From 48f77f3313ae2cd9377105dc1e807458b8ae00e0 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 2 Nov 2024 11:14:25 +0800 Subject: [PATCH 01/58] add core modules of paddle backend and water/se_e2_a example --- .pre-commit-config.yaml | 52 +++++++++++++++++++------------------- deepmd/main.py | 3 ++- deepmd/utils/batch_size.py | 20 +++++++++++++-- deepmd/utils/data.py | 15 +++++++++++ 4 files changed, 61 insertions(+), 29 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a74cf86aec..62957a5ac5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,13 +57,13 @@ repos: - id: clang-format exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.9.0-1 @@ -75,25 +75,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/main.py b/deepmd/main.py index 777bfd3aa3..4b2d88f598 100644 --- a/deepmd/main.py +++ b/deepmd/main.py @@ -102,9 +102,10 @@ def main_parser() -> argparse.ArgumentParser: formatter_class=RawTextArgumentDefaultsHelpFormatter, epilog=textwrap.dedent( """\ - Use --tf or --pt to choose the backend: + Use --tf, --pt or --pd to choose the backend: dp --tf train input.json dp --pt train input.json + dp --pd train input.json """ ), ) diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 8fe67ad6fc..0eb6ac9518 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -181,7 +181,11 @@ def execute_with_batch_size( *[ ( vv[start_index:end_index, ...] - if array_api_compat.is_array_api_obj(vv) and vv.ndim > 1 + if ( + array_api_compat.is_array_api_obj(vv) + and vv.ndim > 1 + or str(vv.__class__) == "" + ) else vv ) for vv in args @@ -189,7 +193,11 @@ def execute_with_batch_size( **{ kk: ( vv[start_index:end_index, ...] - if array_api_compat.is_array_api_obj(vv) and vv.ndim > 1 + if ( + array_api_compat.is_array_api_obj(vv) + and vv.ndim > 1 + or str(vv.__class__) == "" + ) else vv ) for kk, vv in kwargs.items() @@ -228,6 +236,14 @@ def concate_result(r): if array_api_compat.is_array_api_obj(r[0]): xp = array_api_compat.array_namespace(r[0]) ret = xp.concat(r, axis=0) + elif str(r[0].__class__) == "": + try: + import paddle + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "The 'paddlepaddle' is required but not installed." + ) from e + ret = paddle.concat(r, axis=0) else: raise RuntimeError(f"Unexpected result type {type(r[0])}") return ret diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 5d324afb95..7f44477d6b 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -248,6 +248,21 @@ def get_item_torch(self, index: int) -> dict: frame["fid"] = index return frame + def get_item_paddle(self, index: int) -> dict: + """Get a single frame data . The frame is picked from the data system by index. The index is coded across all the sets. + + Parameters + ---------- + index + index of the frame + """ + i = bisect.bisect_right(self.prefix_sum, index) + frames = self._load_set(self.dirs[i]) + frame = self._get_subdata(frames, index - self.prefix_sum[i]) + frame = self.reformat_data_torch(frame) + frame["fid"] = index + return frame + def get_batch(self, batch_size: int) -> dict: """Get a batch of data with `batch_size` frames. The frames are randomly picked from the data system. From 2082a59ed2b21ed242ac2f9b2faf2167f0148e36 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 2 Nov 2024 18:18:37 +0800 Subject: [PATCH 02/58] add paddle code in consistent test --- backend/find_paddle.py | 133 ++ deepmd/backend/paddle.py | 124 ++ deepmd/pd/cxx_op.py | 25 + deepmd/pd/entrypoints/__init__.py | 1 + deepmd/pd/entrypoints/main.py | 586 ++++++++ deepmd/pd/infer/__init__.py | 1 + deepmd/pd/infer/deep_eval.py | 621 +++++++++ deepmd/pd/infer/inference.py | 66 + deepmd/pd/loss/__init__.py | 12 + deepmd/pd/loss/ener.py | 428 ++++++ deepmd/pd/loss/loss.py | 40 + deepmd/pd/model/__init__.py | 6 + deepmd/pd/model/atomic_model/__init__.py | 31 + .../model/atomic_model/base_atomic_model.py | 578 ++++++++ .../pd/model/atomic_model/dp_atomic_model.py | 333 +++++ .../model/atomic_model/energy_atomic_model.py | 20 + deepmd/pd/model/backbone/__init__.py | 8 + deepmd/pd/model/backbone/backbone.py | 12 + deepmd/pd/model/descriptor/__init__.py | 24 + deepmd/pd/model/descriptor/base_descriptor.py | 8 + deepmd/pd/model/descriptor/descriptor.py | 230 ++++ deepmd/pd/model/descriptor/env_mat.py | 87 ++ deepmd/pd/model/descriptor/se_a.py | 680 +++++++++ deepmd/pd/model/model/__init__.py | 144 ++ deepmd/pd/model/model/dp_model.py | 54 + deepmd/pd/model/model/ener_model.py | 135 ++ deepmd/pd/model/model/frozen.py | 182 +++ deepmd/pd/model/model/make_model.py | 587 ++++++++ deepmd/pd/model/model/model.py | 55 + deepmd/pd/model/model/transform_output.py | 268 ++++ deepmd/pd/model/network/__init__.py | 1 + deepmd/pd/model/network/init.py | 458 +++++++ deepmd/pd/model/network/layernorm.py | 163 +++ deepmd/pd/model/network/mlp.py | 328 +++++ deepmd/pd/model/network/network.py | 555 ++++++++ deepmd/pd/model/task/__init__.py | 22 + deepmd/pd/model/task/base_fitting.py | 8 + deepmd/pd/model/task/ener.py | 257 ++++ deepmd/pd/model/task/fitting.py | 499 +++++++ deepmd/pd/model/task/invar_fitting.py | 181 +++ deepmd/pd/model/task/task.py | 1 + deepmd/pd/model/task/type_predict.py | 47 + deepmd/pd/train/__init__.py | 1 + deepmd/pd/train/training.py | 1215 +++++++++++++++++ deepmd/pd/train/wrapper.py | 222 +++ deepmd/pd/utils/__init__.py | 11 + deepmd/pd/utils/ase_calc.py | 6 + deepmd/pd/utils/auto_batch_size.py | 60 + deepmd/pd/utils/cache.py | 31 + deepmd/pd/utils/dataloader.py | 339 +++++ deepmd/pd/utils/dataset.py | 57 + deepmd/pd/utils/decomp.py | 247 ++++ deepmd/pd/utils/dp_random.py | 14 + deepmd/pd/utils/env.py | 109 ++ deepmd/pd/utils/env_mat_stat.py | 235 ++++ deepmd/pd/utils/exclude_mask.py | 164 +++ deepmd/pd/utils/finetune.py | 200 +++ deepmd/pd/utils/learning_rate.py | 53 + deepmd/pd/utils/multi_task.py | 162 +++ deepmd/pd/utils/neighbor_stat.py | 197 +++ deepmd/pd/utils/nlist.py | 534 ++++++++ deepmd/pd/utils/no_use_init.py | 515 +++++++ deepmd/pd/utils/plugin.py | 16 + deepmd/pd/utils/preprocess.py | 314 +++++ deepmd/pd/utils/region.py | 133 ++ deepmd/pd/utils/serialization.py | 30 + deepmd/pd/utils/stat.py | 604 ++++++++ deepmd/pd/utils/update_sel.py | 14 + deepmd/pd/utils/utils.py | 176 +++ source/tests/consistent/common.py | 70 +- source/tests/consistent/descriptor/common.py | 31 + .../consistent/descriptor/test_se_e2_a.py | 26 + source/tests/consistent/fitting/common.py | 3 + source/tests/consistent/fitting/test_ener.py | 42 + source/tests/consistent/model/common.py | 14 + source/tests/consistent/model/test_ener.py | 12 + source/tests/consistent/test_activation.py | 19 +- .../tests/consistent/test_type_embedding.py | 15 + 78 files changed, 13886 insertions(+), 4 deletions(-) create mode 100644 backend/find_paddle.py create mode 100644 deepmd/backend/paddle.py create mode 100644 deepmd/pd/cxx_op.py create mode 100644 deepmd/pd/entrypoints/__init__.py create mode 100644 deepmd/pd/entrypoints/main.py create mode 100644 deepmd/pd/infer/__init__.py create mode 100644 deepmd/pd/infer/deep_eval.py create mode 100644 deepmd/pd/infer/inference.py create mode 100644 deepmd/pd/loss/__init__.py create mode 100644 deepmd/pd/loss/ener.py create mode 100644 deepmd/pd/loss/loss.py create mode 100644 deepmd/pd/model/__init__.py create mode 100644 deepmd/pd/model/atomic_model/__init__.py create mode 100644 deepmd/pd/model/atomic_model/base_atomic_model.py create mode 100644 deepmd/pd/model/atomic_model/dp_atomic_model.py create mode 100644 deepmd/pd/model/atomic_model/energy_atomic_model.py create mode 100644 deepmd/pd/model/backbone/__init__.py create mode 100644 deepmd/pd/model/backbone/backbone.py create mode 100644 deepmd/pd/model/descriptor/__init__.py create mode 100644 deepmd/pd/model/descriptor/base_descriptor.py create mode 100644 deepmd/pd/model/descriptor/descriptor.py create mode 100644 deepmd/pd/model/descriptor/env_mat.py create mode 100644 deepmd/pd/model/descriptor/se_a.py create mode 100644 deepmd/pd/model/model/__init__.py create mode 100644 deepmd/pd/model/model/dp_model.py create mode 100644 deepmd/pd/model/model/ener_model.py create mode 100644 deepmd/pd/model/model/frozen.py create mode 100644 deepmd/pd/model/model/make_model.py create mode 100644 deepmd/pd/model/model/model.py create mode 100644 deepmd/pd/model/model/transform_output.py create mode 100644 deepmd/pd/model/network/__init__.py create mode 100644 deepmd/pd/model/network/init.py create mode 100644 deepmd/pd/model/network/layernorm.py create mode 100644 deepmd/pd/model/network/mlp.py create mode 100644 deepmd/pd/model/network/network.py create mode 100644 deepmd/pd/model/task/__init__.py create mode 100644 deepmd/pd/model/task/base_fitting.py create mode 100644 deepmd/pd/model/task/ener.py create mode 100644 deepmd/pd/model/task/fitting.py create mode 100644 deepmd/pd/model/task/invar_fitting.py create mode 100644 deepmd/pd/model/task/task.py create mode 100644 deepmd/pd/model/task/type_predict.py create mode 100644 deepmd/pd/train/__init__.py create mode 100644 deepmd/pd/train/training.py create mode 100644 deepmd/pd/train/wrapper.py create mode 100644 deepmd/pd/utils/__init__.py create mode 100644 deepmd/pd/utils/ase_calc.py create mode 100644 deepmd/pd/utils/auto_batch_size.py create mode 100644 deepmd/pd/utils/cache.py create mode 100644 deepmd/pd/utils/dataloader.py create mode 100644 deepmd/pd/utils/dataset.py create mode 100644 deepmd/pd/utils/decomp.py create mode 100644 deepmd/pd/utils/dp_random.py create mode 100644 deepmd/pd/utils/env.py create mode 100644 deepmd/pd/utils/env_mat_stat.py create mode 100644 deepmd/pd/utils/exclude_mask.py create mode 100644 deepmd/pd/utils/finetune.py create mode 100644 deepmd/pd/utils/learning_rate.py create mode 100644 deepmd/pd/utils/multi_task.py create mode 100644 deepmd/pd/utils/neighbor_stat.py create mode 100644 deepmd/pd/utils/nlist.py create mode 100644 deepmd/pd/utils/no_use_init.py create mode 100644 deepmd/pd/utils/plugin.py create mode 100644 deepmd/pd/utils/preprocess.py create mode 100644 deepmd/pd/utils/region.py create mode 100644 deepmd/pd/utils/serialization.py create mode 100644 deepmd/pd/utils/stat.py create mode 100644 deepmd/pd/utils/update_sel.py create mode 100644 deepmd/pd/utils/utils.py diff --git a/backend/find_paddle.py b/backend/find_paddle.py new file mode 100644 index 0000000000..bc54cdcaa5 --- /dev/null +++ b/backend/find_paddle.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import site +from functools import ( + lru_cache, +) +from importlib.machinery import ( + FileFinder, +) +from importlib.util import ( + find_spec, +) +from pathlib import ( + Path, +) +from sysconfig import ( + get_path, +) +from typing import ( + Optional, + Union, +) + + +@lru_cache +def find_paddle() -> tuple[Optional[str], list[str]]: + """Find PaddlePadle library. + + Tries to find PaddlePadle in the order of: + + 1. Environment variable `PADDLE_ROOT` if set + 2. The current Python environment. + 3. user site packages directory if enabled + 4. system site packages directory (purelib) + + Considering the default PaddlePadle package still uses old CXX11 ABI, we + cannot install it automatically. + + Returns + ------- + str, optional + PaddlePadle library path if found. + list of str + Paddle requirement if not found. Empty if found. + """ + if os.environ.get("DP_ENABLE_PADDLE", "0") == "0": + return None, [] + requires = [] + pd_spec = None + + if (pd_spec is None or not pd_spec) and os.environ.get("PADDLE_ROOT") is not None: + site_packages = Path(os.environ.get("PADDLE_ROOT")).parent.absolute() + pd_spec = FileFinder(str(site_packages)).find_spec("paddle") + + # get paddle spec + # note: isolated build will not work for backend + if pd_spec is None or not pd_spec: + pd_spec = find_spec("paddle") + + if not pd_spec and site.ENABLE_USER_SITE: + # first search TF from user site-packages before global site-packages + site_packages = site.getusersitepackages() + if site_packages: + pd_spec = FileFinder(site_packages).find_spec("paddle") + + if not pd_spec: + # purelib gets site-packages path + site_packages = get_path("purelib") + if site_packages: + pd_spec = FileFinder(site_packages).find_spec("paddle") + + # get install dir from spec + try: + pd_install_dir = pd_spec.submodule_search_locations[0] # type: ignore + # AttributeError if ft_spec is None + # TypeError if submodule_search_locations are None + # IndexError if submodule_search_locations is an empty list + except (AttributeError, TypeError, IndexError): + pd_install_dir = None + requires.extend(get_pd_requirement()["paddle"]) + return pd_install_dir, requires + + +@lru_cache +def get_pd_requirement(pd_version: str = "") -> dict: + """Get PaddlePadle requirement when Paddle is not installed. + + If pd_version is not given and the environment variable `PADDLE_VERSION` is set, use it as the requirement. + + Parameters + ---------- + pd_version : str, optional + Paddle version + + Returns + ------- + dict + PaddlePadle requirement. + """ + if pd_version is None: + return {"paddle": []} + if pd_version == "": + pd_version = os.environ.get("PADDLE_VERSION", "") + + return { + "paddle": [ + "paddlepaddle>=3.0.0b1" if pd_version != "" else "paddlepaddle>=3.0.0b1", + ], + } + + +@lru_cache +def get_pd_version(pd_path: Optional[Union[str, Path]]) -> str: + """Get Paddle version from a Paddle Python library path. + + Parameters + ---------- + pd_path : str or Path + Paddle Python library path, e.g. "/python3.10/site-packages/paddle/" + + Returns + ------- + str + version + """ + if pd_path is None or pd_path == "": + return "" + version_file = Path(pd_path) / "version" / "__init__.py" + spec = importlib.util.spec_from_file_location("paddle.version", version_file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module.full_version diff --git a/deepmd/backend/paddle.py b/deepmd/backend/paddle.py new file mode 100644 index 0000000000..b1f664e76a --- /dev/null +++ b/deepmd/backend/paddle.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from importlib.util import ( + find_spec, +) +from typing import ( + TYPE_CHECKING, + Callable, + ClassVar, +) + +from deepmd.backend.backend import ( + Backend, +) + +if TYPE_CHECKING: + from argparse import ( + Namespace, + ) + + from deepmd.infer.deep_eval import ( + DeepEvalBackend, + ) + from deepmd.utils.neighbor_stat import ( + NeighborStat, + ) + + +@Backend.register("pd") +@Backend.register("paddle") +class PaddleBackend(Backend): + """Paddle backend.""" + + name = "Paddle" + """The formal name of the backend.""" + features: ClassVar[Backend.Feature] = ( + Backend.Feature.ENTRY_POINT + | Backend.Feature.DEEP_EVAL + | Backend.Feature.NEIGHBOR_STAT + | Backend.Feature.IO + ) + """The features of the backend.""" + suffixes: ClassVar[list[str]] = [".json", ".pd"] + """The suffixes of the backend.""" + + def is_available(self) -> bool: + """Check if the backend is available. + + Returns + ------- + bool + Whether the backend is available. + """ + return find_spec("paddle") is not None + + @property + def entry_point_hook(self) -> Callable[["Namespace"], None]: + """The entry point hook of the backend. + + Returns + ------- + Callable[[Namespace], None] + The entry point hook of the backend. + """ + from deepmd.pd.entrypoints.main import main as deepmd_main + + return deepmd_main + + @property + def deep_eval(self) -> type["DeepEvalBackend"]: + """The Deep Eval backend of the backend. + + Returns + ------- + type[DeepEvalBackend] + The Deep Eval backend of the backend. + """ + from deepmd.pd.infer.deep_eval import DeepEval as DeepEvalPD + + return DeepEvalPD + + @property + def neighbor_stat(self) -> type["NeighborStat"]: + """The neighbor statistics of the backend. + + Returns + ------- + type[NeighborStat] + The neighbor statistics of the backend. + """ + from deepmd.pd.utils.neighbor_stat import ( + NeighborStat, + ) + + return NeighborStat + + @property + def serialize_hook(self) -> Callable[[str], dict]: + """The serialize hook to convert the model file to a dictionary. + + Returns + ------- + Callable[[str], dict] + The serialize hook of the backend. + """ + from deepmd.pd.utils.serialization import ( + serialize_from_file, + ) + + return serialize_from_file + + @property + def deserialize_hook(self) -> Callable[[str, dict], None]: + """The deserialize hook to convert the dictionary to a model file. + + Returns + ------- + Callable[[str, dict], None] + The deserialize hook of the backend. + """ + from deepmd.pd.utils.serialization import ( + deserialize_to_file, + ) + + return deserialize_to_file diff --git a/deepmd/pd/cxx_op.py b/deepmd/pd/cxx_op.py new file mode 100644 index 0000000000..61d34a958c --- /dev/null +++ b/deepmd/pd/cxx_op.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +def load_library(module_name: str) -> bool: + """Load OP library. + + Parameters + ---------- + module_name : str + Name of the module + + Returns + ------- + bool + Whether the library is loaded successfully + """ + # NOTE: Paddle do not support loading library from .so file yet. + return False + + +ENABLE_CUSTOMIZED_OP = load_library("deepmd_op_pd") + +__all__ = [ + "ENABLE_CUSTOMIZED_OP", +] diff --git a/deepmd/pd/entrypoints/__init__.py b/deepmd/pd/entrypoints/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/entrypoints/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py new file mode 100644 index 0000000000..e8b6a0d0c7 --- /dev/null +++ b/deepmd/pd/entrypoints/main.py @@ -0,0 +1,586 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import argparse +import copy +import json +import logging +from pathlib import ( + Path, +) +from typing import ( + Optional, + Union, +) + +import h5py +import paddle +import paddle.distributed as dist +import paddle.distributed.fleet as fleet +import paddle.version + +from deepmd import ( + __version__, +) +from deepmd.common import ( + expand_sys_str, +) +from deepmd.env import ( + GLOBAL_CONFIG, +) +from deepmd.loggers.loggers import ( + set_log_handles, +) +from deepmd.main import ( + parse_args, +) +from deepmd.pd.cxx_op import ( + ENABLE_CUSTOMIZED_OP, +) +from deepmd.pd.infer import ( + inference, +) +from deepmd.pd.model.model import ( + BaseModel, +) +from deepmd.pd.train import ( + training, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.argcheck import ( + normalize, +) +from deepmd.utils.compat import ( + update_deepmd_input, +) +from deepmd.utils.data_system import ( + get_data, + process_systems, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.summary import SummaryPrinter as BaseSummaryPrinter + +# from paddle.distributed.elastic.multiprocessing.errors import ( +# record, +# ) + + +log = logging.getLogger(__name__) + + +def get_trainer( + config, + init_model=None, + restart_model=None, + finetune_model=None, + force_load=False, + init_frz_model=None, + shared_links=None, + finetune_links=None, +): + multi_task = "model_dict" in config.get("model", {}) + + # Initialize DDP + world_size = dist.get_world_size() + if world_size > 1: + assert paddle.version.nccl() != "0" + fleet.init(is_collective=True) + + def prepare_trainer_input_single( + model_params_single, data_dict_single, rank=0, seed=None + ): + training_dataset_params = data_dict_single["training_data"] + validation_dataset_params = data_dict_single.get("validation_data", None) + validation_systems = ( + validation_dataset_params["systems"] if validation_dataset_params else None + ) + training_systems = training_dataset_params["systems"] + training_systems = process_systems(training_systems) + if validation_systems is not None: + validation_systems = process_systems(validation_systems) + + # stat files + stat_file_path_single = data_dict_single.get("stat_file", None) + if rank != 0: + stat_file_path_single = None + elif stat_file_path_single is not None: + if not Path(stat_file_path_single).exists(): + if stat_file_path_single.endswith((".h5", ".hdf5")): + with h5py.File(stat_file_path_single, "w") as f: + pass + else: + Path(stat_file_path_single).mkdir() + stat_file_path_single = DPPath(stat_file_path_single, "a") + + # validation and training data + # avoid the same batch sequence among devices + rank_seed = (seed + rank) % (2**32) if seed is not None else None + validation_data_single = ( + DpLoaderSet( + validation_systems, + validation_dataset_params["batch_size"], + model_params_single["type_map"], + seed=rank_seed, + ) + if validation_systems + else None + ) + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single["type_map"], + seed=rank_seed, + ) + return ( + train_data_single, + validation_data_single, + stat_file_path_single, + ) + + rank = dist.get_rank() if dist.is_available() and dist.is_initialized() else 0 + data_seed = config["training"].get("seed", None) + if not multi_task: + ( + train_data, + validation_data, + stat_file_path, + ) = prepare_trainer_input_single( + config["model"], + config["training"], + rank=rank, + seed=data_seed, + ) + else: + train_data, validation_data, stat_file_path = {}, {}, {} + for model_key in config["model"]["model_dict"]: + ( + train_data[model_key], + validation_data[model_key], + stat_file_path[model_key], + ) = prepare_trainer_input_single( + config["model"]["model_dict"][model_key], + config["training"]["data_dict"][model_key], + rank=rank, + seed=data_seed, + ) + + trainer = training.Trainer( + config, + train_data, + stat_file_path=stat_file_path, + validation_data=validation_data, + init_model=init_model, + restart_model=restart_model, + finetune_model=finetune_model, + force_load=force_load, + shared_links=shared_links, + finetune_links=finetune_links, + init_frz_model=init_frz_model, + ) + return trainer + + +class SummaryPrinter(BaseSummaryPrinter): + """Summary printer for Paddle.""" + + def is_built_with_cuda(self) -> bool: + """Check if the backend is built with CUDA.""" + return paddle.device.is_compiled_with_cuda() + + def is_built_with_rocm(self) -> bool: + """Check if the backend is built with ROCm.""" + return paddle.device.is_compiled_with_rocm() + + def get_compute_device(self) -> str: + """Get Compute device.""" + return str(DEVICE) + + def get_ngpus(self) -> int: + """Get the number of GPUs.""" + return paddle.device.cuda.device_count() + + def get_backend_info(self) -> dict: + """Get backend information.""" + if ENABLE_CUSTOMIZED_OP: + op_info = { + "build with PD ver": GLOBAL_CONFIG["pd_version"], + "build with PD inc": GLOBAL_CONFIG["pd_include_dir"].replace(";", "\n"), + "build with PD lib": GLOBAL_CONFIG["pd_libs"].replace(";", "\n"), + } + else: + op_info = {} + return { + "Backend": "Paddle", + "PD ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}", + "Enable custom OP": ENABLE_CUSTOMIZED_OP, + **op_info, + } + + +def train(FLAGS): + log.info("Configuration path: %s", FLAGS.INPUT) + SummaryPrinter()() + with open(FLAGS.INPUT) as fin: + config = json.load(fin) + # ensure suffix, as in the command line help, we say "path prefix of checkpoint files" + if FLAGS.init_model is not None and not FLAGS.init_model.endswith(".pd"): + FLAGS.init_model += ".pd" + if FLAGS.restart is not None and not FLAGS.restart.endswith(".pd"): + FLAGS.restart += ".pd" + + # update multitask config + multi_task = "model_dict" in config["model"] + shared_links = None + if multi_task: + config["model"], shared_links = preprocess_shared_params(config["model"]) + # handle the special key + assert ( + "RANDOM" not in config["model"]["model_dict"] + ), "Model name can not be 'RANDOM' in multi-task mode!" + + # update fine-tuning config + finetune_links = None + if FLAGS.finetune is not None: + config["model"], finetune_links = get_finetune_rules( + FLAGS.finetune, + config["model"], + model_branch=FLAGS.model_branch, + change_model_params=FLAGS.use_pretrain_script, + ) + # update init_model or init_frz_model config if necessary + if ( + FLAGS.init_model is not None or FLAGS.init_frz_model is not None + ) and FLAGS.use_pretrain_script: + if FLAGS.init_model is not None: + init_state_dict = paddle.load(FLAGS.init_model) + if "model" in init_state_dict: + init_state_dict = init_state_dict["model"] + config["model"] = init_state_dict["_extra_state"]["model_params"] + else: + raise NotImplementedError("FLAGS.init_model can not be empty.") + + # argcheck + config = update_deepmd_input(config, warning=True, dump="input_v2_compat.json") + config = normalize(config, multi_task=multi_task) + + # do neighbor stat + min_nbor_dist = None + if not FLAGS.skip_neighbor_stat: + log.info( + "Calculate neighbor statistics... (add --skip-neighbor-stat to skip this step)" + ) + + if not multi_task: + type_map = config["model"].get("type_map") + train_data = get_data( + config["training"]["training_data"], 0, type_map, None + ) + config["model"], min_nbor_dist = BaseModel.update_sel( + train_data, type_map, config["model"] + ) + else: + min_nbor_dist = {} + for model_item in config["model"]["model_dict"]: + type_map = config["model"]["model_dict"][model_item].get("type_map") + train_data = get_data( + config["training"]["data_dict"][model_item]["training_data"], + 0, + type_map, + None, + ) + config["model"]["model_dict"][model_item], min_nbor_dist[model_item] = ( + BaseModel.update_sel( + train_data, type_map, config["model"]["model_dict"][model_item] + ) + ) + + with open(FLAGS.output, "w") as fp: + json.dump(config, fp, indent=4) + + trainer = get_trainer( + config, + FLAGS.init_model, + FLAGS.restart, + FLAGS.finetune, + FLAGS.force_load, + FLAGS.init_frz_model, + shared_links=shared_links, + finetune_links=finetune_links, + ) + # save min_nbor_dist + if min_nbor_dist is not None: + if not multi_task: + trainer.model.min_nbor_dist = min_nbor_dist + else: + for model_item in min_nbor_dist: + trainer.model[model_item].min_nbor_dist = min_nbor_dist[model_item] + trainer.run() + + +def freeze(FLAGS): + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + model = inference.Tester(FLAGS.model, head=FLAGS.head).model + model.eval() + from paddle.static import ( + InputSpec, + ) + + """ + ** coord [None, natoms, 3] paddle.float64 + ** atype [None, natoms] paddle.int64 + ** nlist [None, natoms, nnei] paddle.int32 + """ + # NOTE: 'FLAGS_save_cf_stack_op', 'FLAGS_prim_enable_dynamic' and + # 'FLAGS_enable_pir_api' shoule be enabled when freezing model. + jit_model = paddle.jit.to_static( + model.forward_lower, + full_graph=True, + input_spec=[ + InputSpec([-1, -1, 3], dtype="float64", name="coord"), + InputSpec([-1, -1], dtype="int32", name="atype"), + InputSpec([-1, -1, -1], dtype="int32", name="nlist"), + ], + ) + if FLAGS.output.endswith(".json"): + FLAGS.output = FLAGS.output[:-5] + paddle.jit.save( + jit_model, + path=FLAGS.output, + skip_prune_program=True, + ) + log.info( + f"Paddle inference model has been exported to: {FLAGS.output}.json and {FLAGS.output}.pdiparams" + ) + + +def show(FLAGS): + if FLAGS.INPUT.split(".")[-1] == "pd": + state_dict = paddle.load(FLAGS.INPUT) + if "model" in state_dict: + state_dict = state_dict["model"] + model_params = state_dict["_extra_state"]["model_params"] + else: + raise RuntimeError( + "The model provided must be a checkpoint file with a .pd extension" + ) + model_is_multi_task = "model_dict" in model_params + log.info("This is a multitask model") if model_is_multi_task else log.info( + "This is a singletask model" + ) + + if "model-branch" in FLAGS.ATTRIBUTES: + # The model must be multitask mode + if not model_is_multi_task: + raise RuntimeError( + "The 'model-branch' option requires a multitask model." + " The provided model does not meet this criterion." + ) + model_branches = list(model_params["model_dict"].keys()) + model_branches += ["RANDOM"] + log.info( + f"Available model branches are {model_branches}, " + f"where 'RANDOM' means using a randomly initialized fitting net." + ) + if "type-map" in FLAGS.ATTRIBUTES: + if model_is_multi_task: + model_branches = list(model_params["model_dict"].keys()) + for branch in model_branches: + type_map = model_params["model_dict"][branch]["type_map"] + log.info(f"The type_map of branch {branch} is {type_map}") + else: + type_map = model_params["type_map"] + log.info(f"The type_map is {type_map}") + if "descriptor" in FLAGS.ATTRIBUTES: + if model_is_multi_task: + model_branches = list(model_params["model_dict"].keys()) + for branch in model_branches: + descriptor = model_params["model_dict"][branch]["descriptor"] + log.info(f"The descriptor parameter of branch {branch} is {descriptor}") + else: + descriptor = model_params["descriptor"] + log.info(f"The descriptor parameter is {descriptor}") + if "fitting-net" in FLAGS.ATTRIBUTES: + if model_is_multi_task: + model_branches = list(model_params["model_dict"].keys()) + for branch in model_branches: + fitting_net = model_params["model_dict"][branch]["fitting_net"] + log.info( + f"The fitting_net parameter of branch {branch} is {fitting_net}" + ) + else: + fitting_net = model_params["fitting_net"] + log.info(f"The fitting_net parameter is {fitting_net}") + + +def change_bias(FLAGS): + if FLAGS.INPUT.endswith(".pd"): + old_state_dict = paddle.load(FLAGS.INPUT) + model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) + model_params = model_state_dict["_extra_state"]["model_params"] + else: + raise RuntimeError( + "Paddle now do not support change bias directly from a freezed model file" + "Please provided a checkpoint file with a .pd extension" + ) + multi_task = "model_dict" in model_params + model_branch = FLAGS.model_branch + bias_adjust_mode = ( + "change-by-statistic" if FLAGS.mode == "change" else "set-by-statistic" + ) + if multi_task: + assert ( + model_branch is not None + ), "For multitask model, the model branch must be set!" + assert model_branch in model_params["model_dict"], ( + f"For multitask model, the model branch must be in the 'model_dict'! " + f"Available options are : {list(model_params['model_dict'].keys())}." + ) + log.info(f"Changing out bias for model {model_branch}.") + model = training.get_model_for_wrapper(model_params) + type_map = ( + model_params["type_map"] + if not multi_task + else model_params["model_dict"][model_branch]["type_map"] + ) + model_to_change = model if not multi_task else model[model_branch] + if FLAGS.INPUT.endswith(".pd"): + wrapper = ModelWrapper(model) + wrapper.set_state_dict(old_state_dict["model"]) + else: + raise NotImplementedError("Only support .pd file") + + if FLAGS.bias_value is not None: + # use user-defined bias + assert model_to_change.model_type in [ + "ener" + ], "User-defined bias is only available for energy model!" + assert ( + len(FLAGS.bias_value) == len(type_map) + ), f"The number of elements in the bias should be the same as that in the type_map: {type_map}." + old_bias = model_to_change.get_out_bias() + bias_to_set = paddle.to_tensor( + FLAGS.bias_value, dtype=old_bias.dtype, place=old_bias.place + ).reshape(old_bias.shape) + model_to_change.set_out_bias(bias_to_set) + log.info( + f"Change output bias of {type_map!s} " + f"from {to_numpy_array(old_bias).reshape(-1)!s} " + f"to {to_numpy_array(bias_to_set).reshape(-1)!s}." + ) + updated_model = model_to_change + else: + # calculate bias on given systems + if FLAGS.datafile is not None: + with open(FLAGS.datafile) as datalist: + all_sys = datalist.read().splitlines() + else: + all_sys = expand_sys_str(FLAGS.system) + data_systems = process_systems(all_sys) + data_single = DpLoaderSet( + data_systems, + 1, + type_map, + ) + mock_loss = training.get_loss( + {"inference": True}, 1.0, len(type_map), model_to_change + ) + data_requirement = mock_loss.label_requirement + data_requirement += training.get_additional_data_requirement(model_to_change) + data_single.add_data_requirement(data_requirement) + nbatches = FLAGS.numb_batch if FLAGS.numb_batch != 0 else float("inf") + sampled_data = make_stat_input( + data_single.systems, + data_single.dataloaders, + nbatches, + ) + updated_model = training.model_change_out_bias( + model_to_change, sampled_data, _bias_adjust_mode=bias_adjust_mode + ) + + if not multi_task: + model = updated_model + else: + model[model_branch] = updated_model + + if FLAGS.INPUT.endswith(".pd"): + output_path = ( + FLAGS.output + if FLAGS.output is not None + else FLAGS.INPUT.replace(".pd", "_updated.pd") + ) + wrapper = ModelWrapper(model) + if "model" in old_state_dict: + old_state_dict["model"] = wrapper.state_dict() + old_state_dict["model"]["_extra_state"] = model_state_dict["_extra_state"] + else: + old_state_dict = wrapper.state_dict() + old_state_dict["_extra_state"] = model_state_dict["_extra_state"] + paddle.save(old_state_dict, output_path) + else: + raise NotImplementedError("Only support .pd file now") + + log.info(f"Saved model to {output_path}") + + +# @record +def main(args: Optional[Union[list[str], argparse.Namespace]] = None): + if not isinstance(args, argparse.Namespace): + FLAGS = parse_args(args=args) + else: + FLAGS = args + + set_log_handles( + FLAGS.log_level, + Path(FLAGS.log_path) if FLAGS.log_path is not None else None, + mpi_log=None, + ) + log.debug("Log handles were successfully set") + log.info("DeePMD version: %s", __version__) + + if FLAGS.command == "train": + train(FLAGS) + elif FLAGS.command == "freeze": + if Path(FLAGS.checkpoint_folder).is_dir(): + checkpoint_path = Path(FLAGS.checkpoint_folder) + latest_ckpt_file = (checkpoint_path / "checkpoint").read_text() + FLAGS.model = str(checkpoint_path.joinpath(latest_ckpt_file)) + else: + FLAGS.model = FLAGS.checkpoint_folder + FLAGS.output = str(Path(FLAGS.output).with_suffix("")) + freeze(FLAGS) + elif FLAGS.command == "show": + show(FLAGS) + elif FLAGS.command == "change-bias": + change_bias(FLAGS) + else: + raise RuntimeError(f"Invalid command {FLAGS.command}!") + + +if __name__ == "__main__": + main() diff --git a/deepmd/pd/infer/__init__.py b/deepmd/pd/infer/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/infer/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/infer/deep_eval.py b/deepmd/pd/infer/deep_eval.py new file mode 100644 index 0000000000..54018b6728 --- /dev/null +++ b/deepmd/pd/infer/deep_eval.py @@ -0,0 +1,621 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT +from deepmd.dpmodel.output_def import ( + ModelOutputDef, + OutputVariableCategory, + OutputVariableDef, +) +from deepmd.infer.deep_eval import DeepEval as DeepEvalWrapper +from deepmd.infer.deep_eval import ( + DeepEvalBackend, +) +from deepmd.infer.deep_pot import ( + DeepPot, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNetConsistent, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_PD_FLOAT_PRECISION, + RESERVED_PRECISON_DICT, + enable_prim, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +if TYPE_CHECKING: + import ase.neighborlist + + +class DeepEval(DeepEvalBackend): + """Paddle backend implementation of DeepEval. + + Parameters + ---------- + model_file : Path + The name of the frozen model file. + output_def : ModelOutputDef + The output definition of the model. + *args : list + Positional arguments. + auto_batch_size : bool or int or AutomaticBatchSize, default: False + If True, automatic batch size will be used. If int, it will be used + as the initial batch size. + neighbor_list : ase.neighborlist.NewPrimitiveNeighborList, optional + The ASE neighbor list class to produce the neighbor list. If None, the + neighbor list will be built natively in the model. + **kwargs : dict + Keyword arguments. + """ + + def __init__( + self, + model_file: str, + output_def: ModelOutputDef, + *args: Any, + auto_batch_size: Union[bool, int, AutoBatchSize] = True, + neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, + head: Optional[Union[str, int]] = None, + **kwargs: Any, + ): + enable_prim(True) + self.output_def = output_def + self.model_path = model_file + if str(self.model_path).endswith(".pd"): + state_dict = paddle.load(model_file) + if "model" in state_dict: + state_dict = state_dict["model"] + self.input_param = state_dict["_extra_state"]["model_params"] + self.model_def_script = self.input_param + self.multi_task = "model_dict" in self.input_param + if self.multi_task: + model_keys = list(self.input_param["model_dict"].keys()) + if isinstance(head, int): + head = model_keys[0] + assert ( + head is not None + ), f"Head must be set for multitask model! Available heads are: {model_keys}" + assert ( + head in model_keys + ), f"No head named {head} in model! Available heads are: {model_keys}" + self.input_param = self.input_param["model_dict"][head] + state_dict_head = {"_extra_state": state_dict["_extra_state"]} + for item in state_dict: + if f"model.{head}." in item: + state_dict_head[ + item.replace(f"model.{head}.", "model.Default.") + ] = state_dict[item].clone() + state_dict = state_dict_head + model = get_model(self.input_param).to(DEVICE) + # model = paddle.jit.to_static(model) + self.dp = ModelWrapper(model) + self.dp.set_state_dict(state_dict) + else: + # self.dp = paddle.jit.load(self.model_path.split(".json")[0]) + raise ValueError(f"Unknown model file format: {self.model_path}!") + self.rcut = self.dp.model["Default"].get_rcut() + self.type_map = self.dp.model["Default"].get_type_map() + if isinstance(auto_batch_size, bool): + if auto_batch_size: + self.auto_batch_size = AutoBatchSize() + else: + self.auto_batch_size = None + elif isinstance(auto_batch_size, int): + self.auto_batch_size = AutoBatchSize(auto_batch_size) + elif isinstance(auto_batch_size, AutoBatchSize): + self.auto_batch_size = auto_batch_size + else: + raise TypeError("auto_batch_size should be bool, int, or AutoBatchSize") + self._has_spin = getattr(self.dp.model["Default"], "has_spin", False) + if callable(self._has_spin): + self._has_spin = self._has_spin() + + def get_rcut(self) -> float: + """Get the cutoff radius of this model.""" + return self.rcut + + def get_ntypes(self) -> int: + """Get the number of atom types of this model.""" + return len(self.type_map) + + def get_type_map(self) -> list[str]: + """Get the type map (element name of the atom types) of this model.""" + return self.type_map + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this DP.""" + return self.dp.model["Default"].get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this DP.""" + return self.dp.model["Default"].get_dim_aparam() + + def get_intensive(self) -> bool: + return self.dp.model["Default"].get_intensive() + + @property + def model_type(self) -> type["DeepEvalWrapper"]: + """The the evaluator of the model type.""" + model_output_type = self.dp.model["Default"].model_output_type() + if "energy" in model_output_type: + return DeepPot + else: + raise RuntimeError("Unknown model type") + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.dp.model["Default"].get_sel_type() + + def get_numb_dos(self) -> int: + """Get the number of DOS.""" + return self.dp.model["Default"].get_numb_dos() + + def get_task_dim(self) -> int: + """Get the output dimension.""" + return self.dp.model["Default"].get_task_dim() + + def get_has_efield(self): + """Check if the model has efield.""" + return False + + def get_ntypes_spin(self): + """Get the number of spin atom types of this model. Only used in old implement.""" + return 0 + + def get_has_spin(self): + """Check if the model has spin atom types.""" + return self._has_spin + + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + atomic: bool = False, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> dict[str, np.ndarray]: + """Evaluate the energy, force and virial by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 9 + atom_types + The atom types + The list should contain natoms ints + atomic + Calculate the atomic energy and virial + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + **kwargs + Other parameters + + Returns + ------- + output_dict : dict + The output of the evaluation. The keys are the names of the output + variables, and the values are the corresponding output arrays. + """ + # convert all of the input to numpy array + atom_types = np.array(atom_types, dtype=np.int32) + coords = np.array(coords) + if cells is not None: + cells = np.array(cells) + natoms, numb_test = self._get_natoms_and_nframes( + coords, atom_types, len(atom_types.shape) > 1 + ) + request_defs = self._get_request_defs(atomic) + if "spin" not in kwargs or kwargs["spin"] is None: + out = self._eval_func(self._eval_model, numb_test, natoms)( + coords, cells, atom_types, fparam, aparam, request_defs + ) + else: + out = self._eval_func(self._eval_model_spin, numb_test, natoms)( + coords, + cells, + atom_types, + np.array(kwargs["spin"]), + fparam, + aparam, + request_defs, + ) + return dict( + zip( + [x.name for x in request_defs], + out, + ) + ) + + def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: + """Get the requested output definitions. + + When atomic is True, all output_def are requested. + When atomic is False, only energy (tensor), force, and virial + are requested. + + Parameters + ---------- + atomic : bool + Whether to request the atomic output. + + Returns + ------- + list[OutputVariableDef] + The requested output definitions. + """ + if atomic: + return list(self.output_def.var_defs.values()) + else: + return [ + x + for x in self.output_def.var_defs.values() + if x.category + in ( + OutputVariableCategory.OUT, + OutputVariableCategory.REDU, + OutputVariableCategory.DERV_R, + OutputVariableCategory.DERV_C_REDU, + ) + ] + + def _eval_func(self, inner_func: Callable, numb_test: int, natoms: int) -> Callable: + """Wrapper method with auto batch size. + + Parameters + ---------- + inner_func : Callable + the method to be wrapped + numb_test : int + number of tests + natoms : int + number of atoms + + Returns + ------- + Callable + the wrapper + """ + if self.auto_batch_size is not None: + + def eval_func(*args, **kwargs): + return self.auto_batch_size.execute_all( + inner_func, numb_test, natoms, *args, **kwargs + ) + + else: + eval_func = inner_func + return eval_func + + def _get_natoms_and_nframes( + self, + coords: np.ndarray, + atom_types: np.ndarray, + mixed_type: bool = False, + ) -> tuple[int, int]: + if mixed_type: + natoms = len(atom_types[0]) + else: + natoms = len(atom_types) + if natoms == 0: + assert coords.size == 0 + else: + coords = np.reshape(np.array(coords), [-1, natoms * 3]) + nframes = coords.shape[0] + return natoms, nframes + + def _eval_model( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + request_defs: list[OutputVariableDef], + ): + model = self.dp.to(DEVICE) + prec = NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PD_FLOAT_PRECISION]] + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + atom_types = np.tile(atom_types, nframes).reshape([nframes, -1]) + else: + natoms = len(atom_types[0]) + + coord_input = paddle.to_tensor( + coords.reshape([nframes, natoms, 3]).astype(prec), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + type_input = paddle.to_tensor( + atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISON_DICT[paddle.int64]]), + dtype=paddle.int64, + place=DEVICE, + ) + if cells is not None: + box_input = paddle.to_tensor( + cells.reshape([nframes, 3, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + else: + box_input = None + if fparam is not None: + fparam_input = to_paddle_tensor( + fparam.reshape([nframes, self.get_dim_fparam()]) + ) + else: + fparam_input = None + if aparam is not None: + aparam_input = to_paddle_tensor( + aparam.reshape([nframes, natoms, self.get_dim_aparam()]) + ) + else: + aparam_input = None + do_atomic_virial = any( + x.category == OutputVariableCategory.DERV_C for x in request_defs + ) + batch_output = model( + coord_input, + type_input, + box=box_input, + do_atomic_virial=do_atomic_virial, + fparam=fparam_input, + aparam=aparam_input, + ) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + + results = [] + for odef in request_defs: + pd_name = self._OUTDEF_DP2BACKEND[odef.name] + if pd_name in batch_output: + shape = self._get_output_shape(odef, nframes, natoms) + out = batch_output[pd_name].reshape(shape).numpy() + results.append(out) + else: + shape = self._get_output_shape(odef, nframes, natoms) + results.append( + np.full(np.abs(shape), np.nan, dtype=prec) + ) # this is kinda hacky + return tuple(results) + + def _eval_model_spin( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + spins: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + request_defs: list[OutputVariableDef], + ): + model = self.dp.to(DEVICE) + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + atom_types = np.tile(atom_types, nframes).reshape([nframes, -1]) + else: + natoms = len(atom_types[0]) + + coord_input = paddle.to_tensor( + coords.reshape([nframes, natoms, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + type_input = paddle.to_tensor(atom_types, dtype=paddle.int64, place=DEVICE) + spin_input = paddle.to_tensor( + spins.reshape([nframes, natoms, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + if cells is not None: + box_input = paddle.to_tensor( + cells.reshape([nframes, 3, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + else: + box_input = None + if fparam is not None: + fparam_input = to_paddle_tensor( + fparam.reshape([nframes, self.get_dim_fparam()]) + ) + else: + fparam_input = None + if aparam is not None: + aparam_input = to_paddle_tensor( + aparam.reshape([nframes, natoms, self.get_dim_aparam()]) + ) + else: + aparam_input = None + + do_atomic_virial = any( + x.category == OutputVariableCategory.DERV_C_REDU for x in request_defs + ) + batch_output = model( + coord_input, + type_input, + spin=spin_input, + box=box_input, + do_atomic_virial=do_atomic_virial, + fparam=fparam_input, + aparam=aparam_input, + ) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + + results = [] + for odef in request_defs: + pd_name = self._OUTDEF_DP2BACKEND[odef.name] + if pd_name in batch_output: + shape = self._get_output_shape(odef, nframes, natoms) + out = batch_output[pd_name].reshape(shape).numpy() + results.append(out) + else: + shape = self._get_output_shape(odef, nframes, natoms) + results.append( + np.full( + np.abs(shape), + np.nan, + dtype=NP_PRECISION_DICT[ + RESERVED_PRECISON_DICT[GLOBAL_PD_FLOAT_PRECISION] + ], + ) + ) # this is kinda hacky + return tuple(results) + + def _get_output_shape(self, odef, nframes, natoms): + if odef.category == OutputVariableCategory.DERV_C_REDU: + # virial + return [nframes, *odef.shape[:-1], 9] + elif odef.category == OutputVariableCategory.REDU: + # energy + return [nframes, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_C: + # atom_virial + return [nframes, *odef.shape[:-1], natoms, 9] + elif odef.category == OutputVariableCategory.DERV_R: + # force + return [nframes, *odef.shape[:-1], natoms, 3] + elif odef.category == OutputVariableCategory.OUT: + # atom_energy, atom_tensor + # Something wrong here? + # return [nframes, *shape, natoms, 1] + return [nframes, natoms, *odef.shape, 1] + else: + raise RuntimeError("unknown category") + + def eval_typeebd(self) -> np.ndarray: + """Evaluate output of type embedding network by using this model. + + Returns + ------- + np.ndarray + The output of type embedding network. The shape is [ntypes, o_size] or [ntypes + 1, o_size], + where ntypes is the number of types, and o_size is the number of nodes + in the output layer. If there are multiple type embedding networks, + these outputs will be concatenated along the second axis. + + Raises + ------ + KeyError + If the model does not enable type embedding. + + See Also + -------- + deepmd.pd.model.network.network.TypeEmbedNetConsistent : + The type embedding network. + """ + out = [] + for mm in self.dp.model["Default"].modules(): + if mm.original_name == TypeEmbedNetConsistent.__name__: + out.append(mm(DEVICE)) + if not out: + raise KeyError("The model has no type embedding networks.") + typeebd = paddle.concat(out, axis=1) + return to_numpy_array(typeebd) + + def get_model_def_script(self) -> str: + """Get model definition script.""" + return self.model_def_script + + def eval_descriptor( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> np.ndarray: + """Evaluate descriptors by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 9 + atom_types + The atom types + The list should contain natoms ints + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + + Returns + ------- + descriptor + Descriptors. + """ + model = self.dp.model["Default"] + model.set_eval_descriptor_hook(True) + self.eval( + coords, + cells, + atom_types, + atomic=False, + fparam=fparam, + aparam=aparam, + **kwargs, + ) + descriptor = model.eval_descriptor() + model.set_eval_descriptor_hook(False) + return to_numpy_array(descriptor) diff --git a/deepmd/pd/infer/inference.py b/deepmd/pd/infer/inference.py new file mode 100644 index 0000000000..1ebadd24c9 --- /dev/null +++ b/deepmd/pd/infer/inference.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from copy import ( + deepcopy, +) + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.env import ( + DEVICE, + JIT, +) + +# if paddle.__version__.startswith("2"): +# import paddle._dynamo +log = logging.getLogger(__name__) + + +class Tester: + def __init__( + self, + model_ckpt, + head=None, + ): + """Construct a DeePMD tester. + + Args: + - config: The Dict-like configuration with training options. + """ + # Model + state_dict = paddle.load(model_ckpt) + if "model" in state_dict: + state_dict = state_dict["model"] + model_params = state_dict["_extra_state"]["model_params"] + self.multi_task = "model_dict" in model_params + if self.multi_task: + assert head is not None, "Head must be specified in multitask mode!" + self.head = head + assert head in model_params["model_dict"], ( + f"Specified head {head} not found in model {model_ckpt}! " + f"Available ones are {list(model_params['model_dict'].keys())}." + ) + model_params = model_params["model_dict"][head] + state_dict_head = {"_extra_state": state_dict["_extra_state"]} + for item in state_dict: + if f"model.{head}." in item: + state_dict_head[ + item.replace(f"model.{head}.", "model.Default.") + ] = state_dict[item].clone() + state_dict = state_dict_head + + self.model_params = deepcopy(model_params) + self.model = get_model(model_params).to(DEVICE) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model) # inference only + if JIT: + raise NotImplementedError + # self.wrapper = paddle.jit.to_static(self.wrapper) + self.wrapper.set_state_dict(state_dict) diff --git a/deepmd/pd/loss/__init__.py b/deepmd/pd/loss/__init__.py new file mode 100644 index 0000000000..0e978b95c2 --- /dev/null +++ b/deepmd/pd/loss/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .ener import ( + EnergyStdLoss, +) +from .loss import ( + TaskLoss, +) + +__all__ = [ + "EnergyStdLoss", + "TaskLoss", +] diff --git a/deepmd/pd/loss/ener.py b/deepmd/pd/loss/ener.py new file mode 100644 index 0000000000..7c5d848b45 --- /dev/null +++ b/deepmd/pd/loss/ener.py @@ -0,0 +1,428 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle +import paddle.nn.functional as F + +from deepmd.pd.loss.loss import ( + TaskLoss, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + GLOBAL_PD_FLOAT_PRECISION, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + + +class EnergyStdLoss(TaskLoss): + def __init__( + self, + starter_learning_rate=1.0, + start_pref_e=0.0, + limit_pref_e=0.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=0.0, + limit_pref_v=0.0, + start_pref_ae: float = 0.0, + limit_pref_ae: float = 0.0, + start_pref_pf: float = 0.0, + limit_pref_pf: float = 0.0, + relative_f: Optional[float] = None, + enable_atom_ener_coeff: bool = False, + start_pref_gf: float = 0.0, + limit_pref_gf: float = 0.0, + numb_generalized_coord: int = 0, + use_l1_all: bool = False, + inference=False, + **kwargs, + ): + r"""Construct a layer to compute loss on energy, force and virial. + + Parameters + ---------- + starter_learning_rate : float + The learning rate at the start of the training. + start_pref_e : float + The prefactor of energy loss at the start of the training. + limit_pref_e : float + The prefactor of energy loss at the end of the training. + start_pref_f : float + The prefactor of force loss at the start of the training. + limit_pref_f : float + The prefactor of force loss at the end of the training. + start_pref_v : float + The prefactor of virial loss at the start of the training. + limit_pref_v : float + The prefactor of virial loss at the end of the training. + start_pref_ae : float + The prefactor of atomic energy loss at the start of the training. + limit_pref_ae : float + The prefactor of atomic energy loss at the end of the training. + start_pref_pf : float + The prefactor of atomic prefactor force loss at the start of the training. + limit_pref_pf : float + The prefactor of atomic prefactor force loss at the end of the training. + relative_f : float + If provided, relative force error will be used in the loss. The difference + of force will be normalized by the magnitude of the force in the label with + a shift given by relative_f + enable_atom_ener_coeff : bool + if true, the energy will be computed as \sum_i c_i E_i + start_pref_gf : float + The prefactor of generalized force loss at the start of the training. + limit_pref_gf : float + The prefactor of generalized force loss at the end of the training. + numb_generalized_coord : int + The dimension of generalized coordinates. + use_l1_all : bool + Whether to use L1 loss, if False (default), it will use L2 loss. + inference : bool + If true, it will output all losses found in output, ignoring the pre-factors. + **kwargs + Other keyword arguments. + """ + super().__init__() + self.starter_learning_rate = starter_learning_rate + self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference + self.has_f = (start_pref_f != 0.0 and limit_pref_f != 0.0) or inference + self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference + self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference + self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference + self.has_gf = start_pref_gf != 0.0 and limit_pref_gf != 0.0 + + self.start_pref_e = start_pref_e + self.limit_pref_e = limit_pref_e + self.start_pref_f = start_pref_f + self.limit_pref_f = limit_pref_f + self.start_pref_v = start_pref_v + self.limit_pref_v = limit_pref_v + self.start_pref_ae = start_pref_ae + self.limit_pref_ae = limit_pref_ae + self.start_pref_pf = start_pref_pf + self.limit_pref_pf = limit_pref_pf + self.start_pref_gf = start_pref_gf + self.limit_pref_gf = limit_pref_gf + self.relative_f = relative_f + self.enable_atom_ener_coeff = enable_atom_ener_coeff + self.numb_generalized_coord = numb_generalized_coord + if self.has_gf and self.numb_generalized_coord < 1: + raise RuntimeError( + "When generalized force loss is used, the dimension of generalized coordinates should be larger than 0" + ) + self.use_l1_all = use_l1_all + self.inference = inference + + def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): + """Return loss on energy and force. + + Parameters + ---------- + input_dict : dict[str, paddle.Tensor] + Model inputs. + model : paddle.nn.Layer + Model to be used to output the predictions. + label : dict[str, paddle.Tensor] + Labels. + natoms : int + The local atom number. + + Returns + ------- + model_pred: dict[str, paddle.Tensor] + Model predictions. + loss: paddle.Tensor + Loss for model to minimize. + more_loss: dict[str, paddle.Tensor] + Other losses for display. + """ + model_pred = model(**input_dict) + coef = learning_rate / self.starter_learning_rate + pref_e = self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * coef + pref_f = self.limit_pref_f + (self.start_pref_f - self.limit_pref_f) * coef + pref_v = self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * coef + pref_ae = self.limit_pref_ae + (self.start_pref_ae - self.limit_pref_ae) * coef + pref_pf = self.limit_pref_pf + (self.start_pref_pf - self.limit_pref_pf) * coef + pref_gf = self.limit_pref_gf + (self.start_pref_gf - self.limit_pref_gf) * coef + + loss = paddle.zeros([1], dtype=env.GLOBAL_PD_FLOAT_PRECISION).to(env.DEVICE)[0] + more_loss = {} + # more_loss['log_keys'] = [] # showed when validation on the fly + # more_loss['test_keys'] = [] # showed when doing dp test + atom_norm = 1.0 / natoms + if self.has_e and "energy" in model_pred and "energy" in label: + energy_pred = model_pred["energy"] + energy_label = label["energy"] + if self.enable_atom_ener_coeff and "atom_energy" in model_pred: + atom_ener_pred = model_pred["atom_energy"] + # when ener_coeff (\nu) is defined, the energy is defined as + # E = \sum_i \nu_i E_i + # instead of the sum of atomic energies. + # + # A case is that we want to train reaction energy + # A + B -> C + D + # E = - E(A) - E(B) + E(C) + E(D) + # A, B, C, D could be put far away from each other + atom_ener_coeff = label["atom_ener_coeff"] + atom_ener_coeff = atom_ener_coeff.reshape(atom_ener_pred.shape) + energy_pred = paddle.sum(atom_ener_coeff * atom_ener_pred, axis=1) + find_energy = label.get("find_energy", 0.0) + pref_e = pref_e * find_energy + if not self.use_l1_all: + l2_ener_loss = paddle.mean(paddle.square(energy_pred - energy_label)) + if not self.inference: + more_loss["l2_ener_loss"] = self.display_if_exist( + l2_ener_loss.detach(), find_energy + ) + loss += atom_norm * (pref_e * l2_ener_loss) + rmse_e = l2_ener_loss.sqrt() * atom_norm + more_loss["rmse_e"] = self.display_if_exist( + rmse_e.detach(), find_energy + ) + # more_loss['log_keys'].append('rmse_e') + else: # use l1 and for all atoms + l1_ener_loss = F.l1_loss( + energy_pred.reshape([-1]), + energy_label.reshape([-1]), + reduction="sum", + ) + loss += pref_e * l1_ener_loss + more_loss["mae_e"] = self.display_if_exist( + F.l1_loss( + energy_pred.reshape([-1]), + energy_label.reshape([-1]), + reduction="mean", + ).detach(), + find_energy, + ) + # more_loss['log_keys'].append('rmse_e') + if mae: + mae_e = paddle.mean(paddle.abs(energy_pred - energy_label)) * atom_norm + more_loss["mae_e"] = self.display_if_exist(mae_e.detach(), find_energy) + mae_e_all = paddle.mean(paddle.abs(energy_pred - energy_label)) + more_loss["mae_e_all"] = self.display_if_exist( + mae_e_all.detach(), find_energy + ) + + if ( + (self.has_f or self.has_pf or self.relative_f or self.has_gf) + and "force" in model_pred + and "force" in label + ): + find_force = label.get("find_force", 0.0) + pref_f = pref_f * find_force + force_pred = model_pred["force"] + force_label = label["force"] + diff_f = (force_label - force_pred).reshape([-1]) + + if self.relative_f is not None: + force_label_3 = force_label.reshape([-1, 3]) + # norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f + norm_f = ( + decomp.norm(force_label_3, axis=1, keepdim=True) + self.relative_f + ) + diff_f_3 = diff_f.reshape([-1, 3]) + diff_f_3 = diff_f_3 / norm_f + diff_f = diff_f_3.reshape([-1]) + + if self.has_f: + if not self.use_l1_all: + l2_force_loss = paddle.mean(paddle.square(diff_f)) + if not self.inference: + more_loss["l2_force_loss"] = self.display_if_exist( + l2_force_loss.detach(), find_force + ) + loss += (pref_f * l2_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_f = l2_force_loss.sqrt() + more_loss["rmse_f"] = self.display_if_exist( + rmse_f.detach(), find_force + ) + else: + l1_force_loss = F.l1_loss(force_label, force_pred, reduction="none") + more_loss["mae_f"] = self.display_if_exist( + l1_force_loss.mean().detach(), find_force + ) + l1_force_loss = l1_force_loss.sum(-1).mean(-1).sum() + loss += (pref_f * l1_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + if mae: + mae_f = paddle.mean(paddle.abs(diff_f)) + more_loss["mae_f"] = self.display_if_exist( + mae_f.detach(), find_force + ) + + if self.has_pf and "atom_pref" in label: + atom_pref = label["atom_pref"] + find_atom_pref = label.get("find_atom_pref", 0.0) + pref_pf = pref_pf * find_atom_pref + atom_pref_reshape = atom_pref.reshape([-1]) + l2_pref_force_loss = (paddle.square(diff_f) * atom_pref_reshape).mean() + if not self.inference: + more_loss["l2_pref_force_loss"] = self.display_if_exist( + l2_pref_force_loss.detach(), find_atom_pref + ) + loss += (pref_pf * l2_pref_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_pf = l2_pref_force_loss.sqrt() + more_loss["rmse_pf"] = self.display_if_exist( + rmse_pf.detach(), find_atom_pref + ) + + if self.has_gf and "drdq" in label: + drdq = label["drdq"] + find_drdq = label.get("find_drdq", 0.0) + pref_gf = pref_gf * find_drdq + force_reshape_nframes = force_pred.reshape([-1, natoms * 3]) + force_label_reshape_nframes = force_label.reshape([-1, natoms * 3]) + drdq_reshape = drdq.reshape( + [-1, natoms * 3, self.numb_generalized_coord] + ) + + # gen_force_label = paddle.einsum( + # "bij,bi->bj", drdq_reshape, force_label_reshape_nframes + # ) + gen_force_label = ( + drdq_reshape * force_label_reshape_nframes.unsqueeze(-1) + ).sum([-2]) + + # gen_force = paddle.einsum( + # "bij,bi->bj", drdq_reshape, force_reshape_nframes + # ) + gen_force = (drdq_reshape * force_reshape_nframes.unsqueeze(-1)).sum( + [-2] + ) + + diff_gen_force = gen_force_label - gen_force + l2_gen_force_loss = paddle.square(diff_gen_force).mean() + if not self.inference: + more_loss["l2_gen_force_loss"] = self.display_if_exist( + l2_gen_force_loss.detach(), find_drdq + ) + loss += (pref_gf * l2_gen_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_gf = l2_gen_force_loss.sqrt() + more_loss["rmse_gf"] = self.display_if_exist( + rmse_gf.detach(), find_drdq + ) + + if self.has_v and "virial" in model_pred and "virial" in label: + find_virial = label.get("find_virial", 0.0) + pref_v = pref_v * find_virial + diff_v = label["virial"] - model_pred["virial"].reshape([-1, 9]) + l2_virial_loss = paddle.mean(paddle.square(diff_v)) + if not self.inference: + more_loss["l2_virial_loss"] = self.display_if_exist( + l2_virial_loss.detach(), find_virial + ) + loss += atom_norm * (pref_v * l2_virial_loss) + rmse_v = l2_virial_loss.sqrt() * atom_norm + more_loss["rmse_v"] = self.display_if_exist(rmse_v.detach(), find_virial) + if mae: + mae_v = paddle.mean(paddle.abs(diff_v)) * atom_norm + more_loss["mae_v"] = self.display_if_exist(mae_v.detach(), find_virial) + + if self.has_ae and "atom_energy" in model_pred and "atom_ener" in label: + atom_ener = model_pred["atom_energy"] + atom_ener_label = label["atom_ener"] + find_atom_ener = label.get("find_atom_ener", 0.0) + pref_ae = pref_ae * find_atom_ener + atom_ener_reshape = atom_ener.reshape([-1]) + atom_ener_label_reshape = atom_ener_label.reshape([-1]) + l2_atom_ener_loss = paddle.square( + atom_ener_label_reshape - atom_ener_reshape + ).mean() + if not self.inference: + more_loss["l2_atom_ener_loss"] = self.display_if_exist( + l2_atom_ener_loss.detach(), find_atom_ener + ) + loss += (pref_ae * l2_atom_ener_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_ae = l2_atom_ener_loss.sqrt() + more_loss["rmse_ae"] = self.display_if_exist( + rmse_ae.detach(), find_atom_ener + ) + + if not self.inference: + more_loss["rmse"] = paddle.sqrt(loss.detach()) + return model_pred, loss, more_loss + + @property + def label_requirement(self) -> list[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + label_requirement = [] + if self.has_e: + label_requirement.append( + DataRequirementItem( + "energy", + ndof=1, + atomic=False, + must=False, + high_prec=True, + ) + ) + if self.has_f: + label_requirement.append( + DataRequirementItem( + "force", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.has_v: + label_requirement.append( + DataRequirementItem( + "virial", + ndof=9, + atomic=False, + must=False, + high_prec=False, + ) + ) + if self.has_ae: + label_requirement.append( + DataRequirementItem( + "atom_ener", + ndof=1, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.has_pf: + label_requirement.append( + DataRequirementItem( + "atom_pref", + ndof=1, + atomic=True, + must=False, + high_prec=False, + repeat=3, + ) + ) + if self.has_gf > 0: + label_requirement.append( + DataRequirementItem( + "drdq", + ndof=self.numb_generalized_coord * 3, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.enable_atom_ener_coeff: + label_requirement.append( + DataRequirementItem( + "atom_ener_coeff", + ndof=1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + ) + ) + return label_requirement diff --git a/deepmd/pd/loss/loss.py b/deepmd/pd/loss/loss.py new file mode 100644 index 0000000000..c083996720 --- /dev/null +++ b/deepmd/pd/loss/loss.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from abc import ( + ABC, + abstractmethod, +) + +import paddle + +from deepmd.utils.data import ( + DataRequirementItem, +) + + +class TaskLoss(paddle.nn.Layer, ABC): + def __init__(self, **kwargs): + """Construct loss.""" + super().__init__() + + def forward(self, input_dict, model, label, natoms, learning_rate): + """Return loss .""" + raise NotImplementedError + + @property + @abstractmethod + def label_requirement(self) -> list[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + pass + + @staticmethod + def display_if_exist(loss: paddle.Tensor, find_property: float) -> paddle.Tensor: + """Display NaN if labeled property is not found. + + Parameters + ---------- + loss : paddle.Tensor + the loss tensor + find_property : float + whether the property is found + """ + return loss if bool(find_property) else paddle.to_tensor(float("nan")) diff --git a/deepmd/pd/model/__init__.py b/deepmd/pd/model/__init__.py new file mode 100644 index 0000000000..171d147114 --- /dev/null +++ b/deepmd/pd/model/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.utils.entry_point import ( + load_entry_point, +) + +load_entry_point("deepmd.pd") diff --git a/deepmd/pd/model/atomic_model/__init__.py b/deepmd/pd/model/atomic_model/__init__.py new file mode 100644 index 0000000000..68a7cc8f79 --- /dev/null +++ b/deepmd/pd/model/atomic_model/__init__.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""The atomic model provides the prediction of some property on each +atom. All the atomic models are not supposed to be directly accessed +by users, but it provides a convenient interface for the +implementation of models. + +Taking the energy models for example, the developeres only needs to +implement the atomic energy prediction via an atomic model, and the +model can be automatically made by the `deepmd.dpmodel.make_model` +method. The `DPModel` is made by +``` +DPModel = make_model(DPAtomicModel) +``` + +""" + +from .base_atomic_model import ( + BaseAtomicModel, +) +from .dp_atomic_model import ( + DPAtomicModel, +) +from .energy_atomic_model import ( + DPEnergyAtomicModel, +) + +__all__ = [ + "BaseAtomicModel", + "DPAtomicModel", + "DPEnergyAtomicModel", +] diff --git a/deepmd/pd/model/atomic_model/base_atomic_model.py b/deepmd/pd/model/atomic_model/base_atomic_model.py new file mode 100644 index 0000000000..44553482c6 --- /dev/null +++ b/deepmd/pd/model/atomic_model/base_atomic_model.py @@ -0,0 +1,578 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import copy +import logging +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.atomic_model import ( + make_base_atomic_model, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.utils import ( + AtomExcludeMask, + PairExcludeMask, + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.pd.utils.stat import ( + compute_output_stats, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) + +log = logging.getLogger(__name__) +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +BaseAtomicModel_ = make_base_atomic_model(paddle.Tensor) + + +class BaseAtomicModel(paddle.nn.Layer, BaseAtomicModel_): + """The base of atomic model. + + Parameters + ---------- + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + atom_exclude_types + Exclude the atomic contribution of the given types + pair_exclude_types + Exclude the pair of atoms of the given types from computing the output + of the atomic model. Implemented by removing the pairs from the nlist. + rcond : float, optional + The condition number for the regression of atomic energy. + preset_out_bias : Dict[str, list[Optional[paddle.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. + + """ + + def __init__( + self, + type_map: list[str], + atom_exclude_types: list[int] = [], + pair_exclude_types: list[tuple[int, int]] = [], + rcond: Optional[float] = None, + preset_out_bias: Optional[dict[str, paddle.Tensor]] = None, + ): + paddle.nn.Layer.__init__(self) + BaseAtomicModel_.__init__(self) + self.type_map = type_map + self.reinit_atom_exclude(atom_exclude_types) + self.reinit_pair_exclude(pair_exclude_types) + self.rcond = rcond + self.preset_out_bias = preset_out_bias + + def init_out_stat(self): + """Initialize the output bias.""" + ntypes = self.get_ntypes() + self.bias_keys: list[str] = list(self.fitting_output_def().keys()) + self.max_out_size = max( + [self.atomic_output_def()[kk].size for kk in self.bias_keys] + ) + self.n_out = len(self.bias_keys) + out_bias_data = self._default_bias() + out_std_data = self._default_std() + self.register_buffer("out_bias", out_bias_data) + self.register_buffer("out_std", out_std_data) + + def set_out_bias(self, out_bias: paddle.Tensor) -> None: + self.out_bias = out_bias + + def __setitem__(self, key, value): + if key in ["out_bias"]: + self.out_bias = value + elif key in ["out_std"]: + self.out_std = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["out_bias"]: + return self.out_bias + elif key in ["out_std"]: + return self.out_std + else: + raise KeyError(key) + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.type_map + + def reinit_atom_exclude( + self, + exclude_types: list[int] = [], + ): + self.atom_exclude_types = exclude_types + if exclude_types == []: + self.atom_excl = None + else: + self.atom_excl = AtomExcludeMask(self.get_ntypes(), self.atom_exclude_types) + + def reinit_pair_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + self.pair_exclude_types = exclude_types + if exclude_types == []: + self.pair_excl = None + else: + self.pair_excl = PairExcludeMask(self.get_ntypes(), self.pair_exclude_types) + + # to make jit happy... + def make_atom_mask( + self, + atype: paddle.Tensor, + ) -> paddle.Tensor: + """The atoms with type < 0 are treated as virutal atoms, + which serves as place-holders for multi-frame calculations + with different number of atoms in different frames. + + Parameters + ---------- + atype + Atom types. >= 0 for real atoms <0 for virtual atoms. + + Returns + ------- + mask + True for real atoms and False for virutal atoms. + + """ + # supposed to be supported by all backends + return atype >= 0 + + def atomic_output_def(self) -> FittingOutputDef: + old_def = self.fitting_output_def() + old_list = list(old_def.get_data().values()) + return FittingOutputDef( + old_list # noqa:RUF005 + + [ + OutputVariableDef( + name="mask", + shape=[1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ) + ] + ) + + def forward_common_atomic( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + """Common interface for atomic inference. + + This method accept extended coordinates, extended atom typs, neighbor list, + and predict the atomic contribution of the fit property. + + Parameters + ---------- + extended_coord + extended coodinates, shape: nf x (nall x 3) + extended_atype + extended atom typs, shape: nf x nall + for a type < 0 indicating the atomic is virtual. + nlist + neighbor list, shape: nf x nloc x nsel + mapping + extended to local index mapping, shape: nf x nall + fparam + frame parameters, shape: nf x dim_fparam + aparam + atomic parameter, shape: nf x nloc x dim_aparam + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + ret_dict + dict of output atomic properties. + should implement the definition of `fitting_output_def`. + ret_dict["mask"] of shape nf x nloc will be provided. + ret_dict["mask"][ff,ii] == 1 indicating the ii-th atom of the ff-th frame is real. + ret_dict["mask"][ff,ii] == 0 indicating the ii-th atom of the ff-th frame is virtual. + + """ + _, nloc, _ = nlist.shape + atype = extended_atype[:, :nloc] + + if self.pair_excl is not None: + pair_mask = self.pair_excl(nlist, extended_atype) + # exclude neighbors in the nlist + nlist = paddle.where(pair_mask == 1, nlist, -1) + + ext_atom_mask = self.make_atom_mask(extended_atype) + ret_dict = self.forward_atomic( + extended_coord, + paddle.where( + ext_atom_mask, extended_atype, paddle.zeros_like(extended_atype) + ), + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + comm_dict=comm_dict, + ) + ret_dict = self.apply_out_stat(ret_dict, atype) + + # nf x nloc + atom_mask = ext_atom_mask[:, :nloc].astype(paddle.int32) + if self.atom_excl is not None: + atom_mask *= self.atom_excl(atype) + + for kk in ret_dict.keys(): + out_shape = ret_dict[kk].shape + out_shape2 = 1 + for ss in out_shape[2:]: + out_shape2 *= ss + ret_dict[kk] = ( + ret_dict[kk].reshape([out_shape[0], out_shape[1], out_shape2]) + * atom_mask.unsqueeze(2).astype(ret_dict[kk].dtype) + ).reshape(out_shape) + ret_dict["mask"] = atom_mask + + return ret_dict + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + return self.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + comm_dict=comm_dict, + ) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.reinit_atom_exclude( + map_atom_exclude_types(self.atom_exclude_types, remap_index) + ) + self.reinit_pair_exclude( + map_pair_exclude_types(self.pair_exclude_types, remap_index) + ) + if has_new_type: + extend_shape = [ + self.out_bias.shape[0], + len(type_map), + *list(self.out_bias.shape[2:]), + ] + extend_bias = paddle.zeros(extend_shape, dtype=self.out_bias.dtype).to( + device=self.out_bias.place + ) + self.out_bias = paddle.concat([self.out_bias, extend_bias], axis=1) + extend_std = paddle.ones(extend_shape, dtype=self.out_std.dtype).to( + device=self.out_std.place + ) + self.out_std = paddle.concat([self.out_std, extend_std], axis=1) + self.out_bias = self.out_bias[:, remap_index, :] + self.out_std = self.out_std[:, remap_index, :] + + def serialize(self) -> dict: + return { + "type_map": self.type_map, + "atom_exclude_types": self.atom_exclude_types, + "pair_exclude_types": self.pair_exclude_types, + "rcond": self.rcond, + "preset_out_bias": self.preset_out_bias, + "@variables": { + "out_bias": to_numpy_array(self.out_bias), + "out_std": to_numpy_array(self.out_std), + }, + } + + @classmethod + def deserialize(cls, data: dict) -> "BaseAtomicModel": + data = copy.deepcopy(data) + variables = data.pop("@variables", None) + variables = ( + {"out_bias": None, "out_std": None} if variables is None else variables + ) + obj = cls(**data) + obj["out_bias"] = ( + to_paddle_tensor(variables["out_bias"]) + if variables["out_bias"] is not None + else obj._default_bias() + ) + obj["out_std"] = ( + to_paddle_tensor(variables["out_std"]) + if variables["out_std"] is not None + else obj._default_std() + ) + return obj + + def compute_or_load_stat( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + raise NotImplementedError + + def compute_or_load_out_stat( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + self.change_out_bias( + merged, + stat_file_path=stat_file_path, + bias_adjust_mode="set-by-statistic", + ) + + def apply_out_stat( + self, + ret: dict[str, paddle.Tensor], + atype: paddle.Tensor, + ): + """Apply the stat to each atomic output. + The developer may override the method to define how the bias is applied + to the atomic output of the model. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc + + """ + out_bias, out_std = self._fetch_out_stat(self.bias_keys) + for kk in self.bias_keys: + # nf x nloc x odims, out_bias: ntypes x odims + ret[kk] = ret[kk] + out_bias[kk][atype] + return ret + + def change_out_bias( + self, + sample_merged, + stat_file_path: Optional[DPPath] = None, + bias_adjust_mode="change-by-statistic", + ) -> None: + """Change the output bias according to the input data and the pretrained model. + + Parameters + ---------- + sample_merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + bias_adjust_mode : str + The mode for changing output bias : ['change-by-statistic', 'set-by-statistic'] + 'change-by-statistic' : perform predictions on labels of target dataset, + and do least square on the errors to obtain the target shift as bias. + 'set-by-statistic' : directly use the statistic output bias in the target dataset. + stat_file_path : Optional[DPPath] + The path to the stat file. + """ + if bias_adjust_mode == "change-by-statistic": + delta_bias, out_std = compute_output_stats( + sample_merged, + self.get_ntypes(), + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, + model_forward=self._get_forward_wrapper_func(), + rcond=self.rcond, + preset_bias=self.preset_out_bias, + atomic_output=self.atomic_output_def(), + ) + self._store_out_stat(delta_bias, out_std, add=True) + elif bias_adjust_mode == "set-by-statistic": + bias_out, std_out = compute_output_stats( + sample_merged, + self.get_ntypes(), + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, + rcond=self.rcond, + preset_bias=self.preset_out_bias, + atomic_output=self.atomic_output_def(), + ) + self._store_out_stat(bias_out, std_out) + else: + raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) + + def _get_forward_wrapper_func(self) -> Callable[..., paddle.Tensor]: + """Get a forward wrapper of the atomic model for output bias calculation.""" + + def model_forward(coord, atype, box, fparam=None, aparam=None): + with ( + paddle.no_grad() + ): # it's essential for pure paddle forward function to use auto_batchsize + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + self.get_rcut(), + self.get_sel(), + mixed_types=self.mixed_types(), + box=box, + ) + atomic_ret = self.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) + return {kk: vv.detach() for kk, vv in atomic_ret.items()} + + return model_forward + + def _default_bias(self): + ntypes = self.get_ntypes() + return paddle.zeros([self.n_out, ntypes, self.max_out_size], dtype=dtype).to( + device=device + ) + + def _default_std(self): + ntypes = self.get_ntypes() + return paddle.ones([self.n_out, ntypes, self.max_out_size], dtype=dtype).to( + device=device + ) + + def _varsize( + self, + shape: list[int], + ) -> int: + output_size = 1 + len_shape = len(shape) + for i in range(len_shape): + output_size *= shape[i] + return output_size + + def _get_bias_index( + self, + kk: str, + ) -> int: + res: list[int] = [] + for i, e in enumerate(self.bias_keys): + if e == kk: + res.append(i) + assert len(res) == 1 + return res[0] + + def _store_out_stat( + self, + out_bias: dict[str, paddle.Tensor], + out_std: dict[str, paddle.Tensor], + add: bool = False, + ): + ntypes = self.get_ntypes() + out_bias_data = paddle.clone(self.out_bias) + out_std_data = paddle.clone(self.out_std) + for kk in out_bias.keys(): + assert kk in out_std.keys() + idx = self._get_bias_index(kk) + size = self._varsize(self.atomic_output_def()[kk].shape) + if not add: + out_bias_data[idx, :, :size] = out_bias[kk].reshape([ntypes, size]) + else: + out_bias_data[idx, :, :size] += out_bias[kk].reshape([ntypes, size]) + out_std_data[idx, :, :size] = out_std[kk].reshape([ntypes, size]) + paddle.assign(out_bias_data, self.out_bias) + paddle.assign(out_std_data, self.out_std) + + def _fetch_out_stat( + self, + keys: list[str], + ) -> tuple[dict[str, paddle.Tensor], dict[str, paddle.Tensor]]: + ret_bias = {} + ret_std = {} + ntypes = self.get_ntypes() + for kk in keys: + idx = self._get_bias_index(kk) + isize = self._varsize(self.atomic_output_def()[kk].shape) + ret_bias[kk] = self.out_bias[idx, :, :isize].reshape( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + ret_std[kk] = self.out_std[idx, :, :isize].reshape( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + return ret_bias, ret_std diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py new file mode 100644 index 0000000000..45eb9ca1cb --- /dev/null +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -0,0 +1,333 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import functools +import logging +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, +) +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_atomic_model import ( + BaseAtomicModel, +) + +log = logging.getLogger(__name__) + + +@BaseAtomicModel.register("standard") +class DPAtomicModel(BaseAtomicModel): + """Model give atomic prediction of some physical property. + + Parameters + ---------- + descriptor + Descriptor + fitting_net + Fitting net + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + """ + + eval_descriptor_list: list[paddle.Tensor] + + def __init__( + self, + descriptor, + fitting, + type_map: list[str], + **kwargs, + ): + super().__init__(type_map, **kwargs) + ntypes = len(type_map) + self.type_map = type_map + self.ntypes = ntypes + self.descriptor = descriptor + self.rcut = self.descriptor.get_rcut() + self.sel = self.descriptor.get_sel() + self.fitting_net = fitting + super().init_out_stat() + self.enable_eval_descriptor_hook = False + self.eval_descriptor_list = [] + + # register 'type_map' as buffer + def _string_to_array(s: str) -> list[int]: + return [ord(c) for c in s] + + self.register_buffer( + "buffer_type_map", + paddle.to_tensor(_string_to_array(" ".join(self.type_map)), dtype="int32"), + ) + self.buffer_type_map.name = "buffer_type_map" + if hasattr(self.descriptor, "has_message_passing"): + # register 'has_message_passing' as buffer(cast to int32 as problems may meets with vector) + self.register_buffer( + "buffer_has_message_passing", + paddle.to_tensor(self.descriptor.has_message_passing(), dtype="int32"), + ) + self.buffer_has_message_passing.name = "buffer_has_message_passing" + # register 'ntypes' as buffer + self.register_buffer( + "buffer_ntypes", paddle.to_tensor(self.ntypes, dtype="int32") + ) + self.buffer_ntypes.name = "buffer_ntypes" + # register 'rcut' as buffer + self.register_buffer( + "buffer_rcut", paddle.to_tensor(self.rcut, dtype="float64") + ) + self.buffer_rcut.name = "buffer_rcut" + if hasattr(self.fitting_net, "get_dim_fparam"): + # register 'dfparam' as buffer + self.register_buffer( + "buffer_dfparam", + paddle.to_tensor(self.fitting_net.get_dim_fparam(), dtype="int32"), + ) + self.buffer_dfparam.name = "buffer_dfparam" + if hasattr(self.fitting_net, "get_dim_aparam"): + # register 'daparam' as buffer + self.register_buffer( + "buffer_daparam", + paddle.to_tensor(self.fitting_net.get_dim_aparam(), dtype="int32"), + ) + self.buffer_daparam.name = "buffer_daparam" + # register 'aparam_nall' as buffer + self.register_buffer( + "buffer_aparam_nall", + paddle.to_tensor(False, dtype="int32"), + ) + self.buffer_aparam_nall.name = "buffer_aparam_nall" + + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.enable_eval_descriptor_hook = enable + self.eval_descriptor_list = [] + + def eval_descriptor(self) -> paddle.Tensor: + """Evaluate the descriptor.""" + return paddle.concat(self.eval_descriptor_list) + + def fitting_output_def(self) -> FittingOutputDef: + """Get the output def of the fitting net.""" + return ( + self.fitting_net.output_def() + if self.fitting_net is not None + else self.coord_denoise_net.output_def() + ) + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.rcut + + def get_sel(self) -> list[int]: + """Get the neighbor selection.""" + return self.sel + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.descriptor.mixed_types() + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + self.type_map = type_map + self.ntypes = len(type_map) + self.descriptor.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descriptor + if model_with_new_type_stat is not None + else None, + ) + self.fitting_net.change_type_map(type_map=type_map) + + def has_message_passing(self) -> bool: + """Returns whether the atomic model has message passing.""" + return self.descriptor.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the atomic model needs sorted nlist when using `forward_lower`.""" + return self.descriptor.need_sorted_nlist_for_lower() + + def serialize(self) -> dict: + dd = BaseAtomicModel.serialize(self) + dd.update( + { + "@class": "Model", + "@version": 2, + "type": "standard", + "type_map": self.type_map, + "descriptor": self.descriptor.serialize(), + "fitting": self.fitting_net.serialize(), + } + ) + return dd + + @classmethod + def deserialize(cls, data) -> "DPAtomicModel": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("@class", None) + data.pop("type", None) + descriptor_obj = BaseDescriptor.deserialize(data.pop("descriptor")) + fitting_obj = BaseFitting.deserialize(data.pop("fitting")) + data["descriptor"] = descriptor_obj + data["fitting"] = fitting_obj + obj = super().deserialize(data) + return obj + + def forward_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + """Return atomic prediction. + + Parameters + ---------- + extended_coord + coodinates in extended region + extended_atype + atomic type in extended region + nlist + neighbor list. nf x nloc x nsel + mapping + mapps the extended indices to local indices + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + + Returns + ------- + result_dict + the result dict, defined by the `FittingOutputDef`. + + """ + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + if self.do_grad_r() or self.do_grad_c(): + extended_coord.stop_gradient = False + descriptor, rot_mat, g2, h2, sw = self.descriptor( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + comm_dict=comm_dict, + ) + assert descriptor is not None + if self.enable_eval_descriptor_hook: + self.eval_descriptor_list.append(descriptor) + # energy, force + fit_ret = self.fitting_net( + descriptor, + atype, + gr=rot_mat, + g2=g2, + h2=h2, + fparam=fparam, + aparam=aparam, + ) + return fit_ret + + def get_out_bias(self) -> paddle.Tensor: + return self.out_bias + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute or load the statistics parameters of the model, + such as mean and standard deviation of descriptors or the energy bias of the fitting net. + When `sampled` is provided, all the statistics parameters will be calculated (or re-calculated for update), + and saved in the `stat_file_path`(s). + When `sampled` is not provided, it will check the existence of `stat_file_path`(s) + and load the calculated statistics parameters. + + Parameters + ---------- + sampled_func + The lazy sampled function to get data frames from different data systems. + stat_file_path + The dictionary of paths to the statistics files. + """ + if stat_file_path is not None and self.type_map is not None: + # descriptors and fitting net with different type_map + # should not share the same parameters + stat_file_path /= " ".join(self.type_map) + + @functools.lru_cache + def wrapped_sampler(): + sampled = sampled_func() + if self.pair_excl is not None: + pair_exclude_types = self.pair_excl.get_exclude_types() + for sample in sampled: + sample["pair_exclude_types"] = list(pair_exclude_types) + if self.atom_excl is not None: + atom_exclude_types = self.atom_excl.get_exclude_types() + for sample in sampled: + sample["atom_exclude_types"] = list(atom_exclude_types) + return sampled + + self.descriptor.compute_input_stats(wrapped_sampler, stat_file_path) + self.compute_or_load_out_stat(wrapped_sampler, stat_file_path) + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.fitting_net.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.fitting_net.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.fitting_net.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return False diff --git a/deepmd/pd/model/atomic_model/energy_atomic_model.py b/deepmd/pd/model/atomic_model/energy_atomic_model.py new file mode 100644 index 0000000000..2d0ef4db4c --- /dev/null +++ b/deepmd/pd/model/atomic_model/energy_atomic_model.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.pd.model.task.ener import ( + EnergyFittingNet, + EnergyFittingNetDirect, + InvarFitting, +) + +from .dp_atomic_model import ( + DPAtomicModel, +) + + +class DPEnergyAtomicModel(DPAtomicModel): + def __init__(self, descriptor, fitting, type_map, **kwargs): + assert ( + isinstance(fitting, EnergyFittingNet) + or isinstance(fitting, EnergyFittingNetDirect) + or isinstance(fitting, InvarFitting) + ) + super().__init__(descriptor, fitting, type_map, **kwargs) diff --git a/deepmd/pd/model/backbone/__init__.py b/deepmd/pd/model/backbone/__init__.py new file mode 100644 index 0000000000..f7948285cf --- /dev/null +++ b/deepmd/pd/model/backbone/__init__.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .backbone import ( + BackBone, +) + +__all__ = [ + "BackBone", +] diff --git a/deepmd/pd/model/backbone/backbone.py b/deepmd/pd/model/backbone/backbone.py new file mode 100644 index 0000000000..f37346a44f --- /dev/null +++ b/deepmd/pd/model/backbone/backbone.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + + +class BackBone(paddle.nn.Layer): + def __init__(self, **kwargs): + """BackBone base method.""" + super().__init__() + + def forward(self, **kwargs): + """Calculate backBone.""" + raise NotImplementedError diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py new file mode 100644 index 0000000000..0141e4cd03 --- /dev/null +++ b/deepmd/pd/model/descriptor/__init__.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + DescriptorBlock, + make_default_type_embedding, +) +from .env_mat import ( + prod_env_mat, +) +from .se_a import ( + DescrptBlockSeA, + DescrptSeA, +) + +__all__ = [ + "BaseDescriptor", + "DescriptorBlock", + "make_default_type_embedding", + "DescrptBlockSeA", + "DescrptSeA", + "prod_env_mat", +] diff --git a/deepmd/pd/model/descriptor/base_descriptor.py b/deepmd/pd/model/descriptor/base_descriptor.py new file mode 100644 index 0000000000..8f0b799f87 --- /dev/null +++ b/deepmd/pd/model/descriptor/base_descriptor.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.dpmodel.descriptor import ( + make_base_descriptor, +) + +BaseDescriptor = make_base_descriptor(paddle.Tensor, "forward") diff --git a/deepmd/pd/model/descriptor/descriptor.py b/deepmd/pd/model/descriptor/descriptor.py new file mode 100644 index 0000000000..b27facd0ae --- /dev/null +++ b/deepmd/pd/model/descriptor/descriptor.py @@ -0,0 +1,230 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.pd.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.plugin import ( + make_plugin_registry, +) + +log = logging.getLogger(__name__) + + +class DescriptorBlock(paddle.nn.Layer, ABC, make_plugin_registry("DescriptorBlock")): + """The building block of descriptor. + Given the input descriptor, provide with the atomic coordinates, + atomic types and neighbor list, calculate the new descriptor. + """ + + local_cluster = False + + def __new__(cls, *args, **kwargs): + if cls is DescriptorBlock: + try: + descrpt_type = kwargs["type"] + except KeyError as e: + raise KeyError( + "the type of DescriptorBlock should be set by `type`" + ) from e + cls = cls.get_class_by_type(descrpt_type) + return super().__new__(cls) + + @abstractmethod + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + pass + + @abstractmethod + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + pass + + @abstractmethod + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + pass + + @abstractmethod + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + pass + + @abstractmethod + def get_ntypes(self) -> int: + """Returns the number of element types.""" + pass + + @abstractmethod + def get_dim_out(self) -> int: + """Returns the output dimension.""" + pass + + @abstractmethod + def get_dim_in(self) -> int: + """Returns the input dimension.""" + pass + + @abstractmethod + def get_dim_emb(self) -> int: + """Returns the embedding dimension.""" + pass + + @abstractmethod + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + pass + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], List[dict]], List[dict]] + - List[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + raise NotImplementedError + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + raise NotImplementedError + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "mean"): + if not resume: + # in case of change params during resume + base_env = EnvMatStatSe(base_class) + base_env.stats = base_class.stats + for kk in base_class.get_stats(): + base_env.stats[kk] += self.get_stats()[kk] + mean, stddev = base_env() + if not base_class.set_davg_zero: + paddle.assign( + paddle.to_tensor(mean).to(device=env.DEVICE), + base_class.mean, + ) # pylint: disable=no-explicit-dtype + paddle.assign( + paddle.to_tensor(stddev).to(device=env.DEVICE), + base_class.stddev, + ) # pylint: disable=no-explicit-dtype + # must share, even if not do stat + self.mean = base_class.mean + self.stddev = base_class.stddev + # self.set_state_dict(base_class.state_dict()) # this does not work, because it only inits the model + # the following will successfully link all the params except buffers + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + @abstractmethod + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + ): + """Calculate DescriptorBlock.""" + pass + + @abstractmethod + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + + @abstractmethod + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + + +def make_default_type_embedding( + ntypes, +): + decomp = {} + decomp["tebd_dim"] = 8 + return TypeEmbedNet(ntypes, decomp["tebd_dim"]), decomp + + +def extend_descrpt_stat(des, type_map, des_with_stat=None): + r""" + Extend the statistics of a descriptor block with types from newly provided `type_map`. + + After extending, the type related dimension of the extended statistics will have a length of + `len(old_type_map) + len(type_map)`, where `old_type_map` represents the type map in `des`. + The `get_index_between_two_maps()` function can then be used to correctly select statistics for types + from `old_type_map` or `type_map`. + Positive indices from 0 to `len(old_type_map) - 1` will select old statistics of types in `old_type_map`, + while negative indices from `-len(type_map)` to -1 will select new statistics of types in `type_map`. + + Parameters + ---------- + des : DescriptorBlock + The descriptor block to be extended. + type_map : List[str] + The name of each type of atoms to be extended. + des_with_stat : DescriptorBlock, Optional + The descriptor block has additional statistics of types from newly provided `type_map`. + If None, the default statistics will be used. + Otherwise, the statistics provided in this DescriptorBlock will be used. + + """ + if des_with_stat is not None: + extend_davg = des_with_stat["davg"] + extend_dstd = des_with_stat["dstd"] + else: + extend_shape = [len(type_map), *list(des["davg"].shape[1:])] + extend_davg = paddle.zeros(extend_shape, dtype=des["davg"].dtype).to( + device=des["davg"].place + ) + extend_dstd = paddle.ones(extend_shape, dtype=des["dstd"].dtype).to( + device=des["dstd"].place + ) + des["davg"] = paddle.concat([des["davg"], extend_davg], axis=0) + des["dstd"] = paddle.concat([des["dstd"], extend_dstd], axis=0) diff --git a/deepmd/pd/model/descriptor/env_mat.py b/deepmd/pd/model/descriptor/env_mat.py new file mode 100644 index 0000000000..3a9daec1e8 --- /dev/null +++ b/deepmd/pd/model/descriptor/env_mat.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.preprocess import ( + compute_smooth_weight, +) + + +def _make_env_mat( + nlist, + coord, + rcut: float, + ruct_smth: float, + radial_only: bool = False, + protection: float = 0.0, +): + """Make smooth environment matrix.""" + bsz, natoms, nnei = nlist.shape + coord = coord.reshape([bsz, -1, 3]) + nall = coord.shape[1] + mask = nlist >= 0 + # nlist = nlist * mask ## this impl will contribute nans in Hessian calculation. + nlist = paddle.where(mask, nlist, nall - 1) + coord_l = coord[:, :natoms].reshape([bsz, -1, 1, 3]) + index = nlist.reshape([bsz, -1]).unsqueeze(-1).expand([-1, -1, 3]) + # coord_r = paddle.take_along_axis(coord, axis=1, indices=index) + coord_r = decomp.take_along_axis(coord, axis=1, indices=index) + coord_r = coord_r.reshape([bsz, natoms, nnei, 3]) + diff = coord_r - coord_l + # length = paddle.linalg.norm(diff, axis=-1, keepdim=True) + length = decomp.norm(diff, axis=-1, keepdim=True) + # for index 0 nloc atom + length = length + (~mask.unsqueeze(-1)).astype(length.dtype) + t0 = 1 / (length + protection) + t1 = diff / (length + protection) ** 2 + weight = compute_smooth_weight(length, ruct_smth, rcut) + weight = weight * mask.unsqueeze(-1).astype(weight.dtype) + if radial_only: + env_mat = t0 * weight + else: + env_mat = paddle.concat([t0.astype(t1.dtype), t1], axis=-1) * weight + return env_mat, diff * mask.unsqueeze(-1).astype(diff.dtype), weight + + +def prod_env_mat( + extended_coord, + nlist, + atype, + mean, + stddev, + rcut: float, + rcut_smth: float, + radial_only: bool = False, + protection: float = 0.0, +): + """Generate smooth environment matrix from atom coordinates and other context. + + Args: + - extended_coord: Copied atom coordinates with shape [nframes, nall*3]. + - atype: Atom types with shape [nframes, nloc]. + - mean: Average value of descriptor per element type with shape [len(sec), nnei, 4 or 1]. + - stddev: Standard deviation of descriptor per element type with shape [len(sec), nnei, 4 or 1]. + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - radial_only: Whether to return a full description or a radial-only descriptor. + - protection: Protection parameter to prevent division by zero errors during calculations. + + Returns + ------- + - env_mat: Shape is [nframes, natoms[1]*nnei*4]. + """ + _env_mat_se_a, diff, switch = _make_env_mat( + nlist, + extended_coord, + rcut, + rcut_smth, + radial_only, + protection=protection, + ) # shape [n_atom, dim, 4 or 1] + t_avg = mean[atype] # [n_atom, dim, 4 or 1] + t_std = stddev[atype] # [n_atom, dim, 4 or 1] + env_mat_se_a = (_env_mat_se_a - t_avg) / t_std + return env_mat_se_a, diff, switch diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py new file mode 100644 index 0000000000..76ea32797f --- /dev/null +++ b/deepmd/pd/model/descriptor/se_a.py @@ -0,0 +1,680 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +from typing import ( + Callable, + ClassVar, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor import ( + DescriptorBlock, + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +try: + from typing import ( + Final, + ) +except ImportError: + from paddle.jit import Final + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + NetworkCollection, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) + +from .base_descriptor import ( + BaseDescriptor, +) + + +@BaseDescriptor.register("se_e2_a") +@BaseDescriptor.register("se_a") +class DescrptSeA(BaseDescriptor, paddle.nn.Layer): + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + type_one_side: bool = True, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ntypes: Optional[int] = None, # to be compat with input + type_map: Optional[list[str]] = None, + # not implemented + spin=None, + ): + del ntypes + if spin is not None: + raise NotImplementedError("old implementation of spin is not supported.") + super().__init__() + self.type_map = type_map + self.sea = DescrptBlockSeA( + rcut, + rcut_smth, + sel, + neuron=neuron, + axis_neuron=axis_neuron, + set_davg_zero=set_davg_zero, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + exclude_types=exclude_types, + env_protection=env_protection, + type_one_side=type_one_side, + trainable=trainable, + seed=seed, + ) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.sea.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.sea.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.sea.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sea.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.sea.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.sea.get_dim_out() + + def get_dim_emb(self) -> int: + """Returns the output dimension.""" + return self.sea.get_dim_emb() + + def mixed_types(self): + """Returns if the descriptor requires a neighbor list that distinguish different + atomic types or not. + """ + return self.sea.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.sea.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.sea.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.sea.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For SeA descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in sea + if shared_level == 0: + self.sea.share_params(base_class.sea, 0, resume=resume) + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.sea.dim_out + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_a does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.sea.compute_input_stats(merged, path) + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + """Update the type exclusions.""" + self.sea.reinit_exclude(exclude_types) + + def forward( + self, + coord_ext: paddle.Tensor, + atype_ext: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + coord_ext + The extended coordinates of atoms. shape: nf x (nallx3) + atype_ext + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + this descriptor returns None + h2 + The rotationally equivariant pair-partical representation. + this descriptor returns None + sw + The smooth switch function. + + """ + return self.sea.forward(nlist, coord_ext, atype_ext, None, mapping) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.sea.mean = mean + self.sea.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.sea.mean, self.sea.stddev + + def serialize(self) -> dict: + obj = self.sea + return { + "@class": "Descriptor", + "type": "se_e2_a", + "@version": 2, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "resnet_dt": obj.resnet_dt, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "type_map": self.type_map, + ## to be updated when the options are supported. + "trainable": True, + "type_one_side": obj.type_one_side, + "spin": None, + } + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeA": + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("@class", None) + data.pop("type", None) + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + env_mat = data.pop("env_mat") + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.sea.prec).to(device=env.DEVICE) + + obj.sea["davg"] = t_cvt(variables["davg"]) + obj.sea["dstd"] = t_cvt(variables["dstd"]) + obj.sea.filter_layers = NetworkCollection.deserialize(embeddings) + return obj + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, local_jdata_cpy["sel"] = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], False + ) + return local_jdata_cpy, min_nbor_dist + + +@DescriptorBlock.register("se_e2_a") +class DescrptBlockSeA(DescriptorBlock): + ndescrpt: Final[int] + __constants__: ClassVar[list] = ["ndescrpt"] + + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + type_one_side: bool = True, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + **kwargs, + ): + """Construct an embedding net of type `se_a`. + + Args: + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - sel: For each element type, how many atoms is selected as neighbors. + - filter_neuron: Number of neurons in each hidden layers of the embedding net. + - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. + """ + super().__init__() + self.rcut = rcut + self.rcut_smth = rcut_smth + self.neuron = neuron + self.filter_neuron = self.neuron + self.axis_neuron = axis_neuron + self.set_davg_zero = set_davg_zero + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.env_protection = env_protection + self.ntypes = len(sel) + self.type_one_side = type_one_side + self.seed = seed + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + self.sel = sel + # should be on CPU to avoid D2H, as it is used as slice index + self.sec = [0, *np.cumsum(self.sel).tolist()] + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + stddev = paddle.ones(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + + ndim = 1 if self.type_one_side else 2 + filter_layers = NetworkCollection( + ndim=ndim, ntypes=len(sel), network_type="embedding_network" + ) + for ii, embedding_idx in enumerate( + itertools.product(range(self.ntypes), repeat=ndim) + ): + filter_layers[embedding_idx] = EmbeddingNet( + 1, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, ii), + ) + self.filter_layers = filter_layers + self.stats = None + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_emb(self) -> int: + """Returns the output dimension.""" + return self.neuron[-1] + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def mixed_types(self) -> bool: + """If true, the discriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the discriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return False + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return 0 + + def __setitem__(self, key, value): + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + ): + """Calculate decoded embedding for each atom. + + Args: + - coord: Tell atom coordinates with shape [nframes, natoms[1]*3]. + - atype: Tell atom types with shape [nframes, natoms[1]]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + - box: Tell simulation box with shape [nframes, 9]. + + Returns + ------- + - `paddle.Tensor`: descriptor matrix with shape [nframes, natoms[0]*self.filter_neuron[-1]*self.axis_neuron]. + """ + del extended_atype_embd, mapping + nf = nlist.shape[0] + nloc = nlist.shape[1] + atype: paddle.Tensor = extended_atype[:, :nloc] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + + assert self.filter_layers is not None + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + dmatrix = dmatrix.astype(self.prec) + nfnl = dmatrix.shape[0] + # pre-allocate a shape to pass jit + xyz_scatter = paddle.zeros( + [nfnl, 4, self.filter_neuron[-1]], + dtype=self.prec, + ).to(extended_coord.place) + # nfnl x nnei + exclude_mask = self.emask(nlist, extended_atype).reshape([nfnl, self.nnei]) + for embedding_idx, ll in enumerate(self.filter_layers.networks): + if self.type_one_side: + ii = embedding_idx + # paddle.jit is not happy with slice(None) + # ti_mask = paddle.ones(nfnl, dtype=paddle.bool, device=dmatrix.place) + # applying a mask seems to cause performance degradation + ti_mask = None + else: + # ti: center atom type, ii: neighbor type... + ii = embedding_idx // self.ntypes + ti = embedding_idx % self.ntypes + ti_mask = atype.flatten() == ti + # nfnl x nt + if ti_mask is not None: + mm = exclude_mask[ti_mask, self.sec[ii] : self.sec[ii + 1]] + else: + mm = exclude_mask[:, self.sec[ii] : self.sec[ii + 1]] + # nfnl x nt x 4 + if ti_mask is not None: + rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] + else: + rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] + if rr.numel() > 0: + rr = rr * mm.unsqueeze(2).astype(rr.dtype) + ss = rr[:, :, :1] + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if ti_mask is not None: + xyz_scatter[ti_mask] += gr + else: + xyz_scatter += gr + + xyz_scatter /= self.nnei + xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) + rot_mat: paddle.Tensor = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = paddle.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nall, self.filter_neuron[-1], self.axis_neuron] + result = result.reshape([nf, nloc, self.filter_neuron[-1] * self.axis_neuron]) + rot_mat = rot_mat.reshape([nf, nloc] + list(rot_mat.shape[1:])) # noqa:RUF005 + return ( + result.astype(env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.astype(env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/model/model/__init__.py b/deepmd/pd/model/model/__init__.py new file mode 100644 index 0000000000..990ee51348 --- /dev/null +++ b/deepmd/pd/model/model/__init__.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""The model that takes the coordinates, cell and atom types as input +and predicts some property. The models are automatically generated from +atomic models by the `deepmd.dpmodel.make_model` method. + +The `make_model` method does the reduction, auto-differentiation and +communication of the atomic properties according to output variable +definition `deepmd.dpmodel.OutputVariableDef`. + +All models should be inherited from :class:`deepmd.pd.model.model.model.BaseModel`. +Models generated by `make_model` have already done it. +""" + +import copy +import json + +import numpy as np + +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task import ( + BaseFitting, +) + +from .dp_model import ( + DPModelCommon, +) +from .ener_model import ( + EnergyModel, +) +from .frozen import ( + FrozenModel, +) +from .make_model import ( + make_model, +) +from .model import ( + BaseModel, +) + + +def _get_standard_model_components(model_params, ntypes): + # descriptor + model_params["descriptor"]["ntypes"] = ntypes + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) + descriptor = BaseDescriptor(**model_params["descriptor"]) + # fitting + fitting_net = model_params.get("fitting_net", {}) + fitting_net["type"] = fitting_net.get("type", "ener") + fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) + fitting_net["mixed_types"] = descriptor.mixed_types() + if fitting_net["type"] in ["dipole", "polar"]: + fitting_net["embedding_width"] = descriptor.get_dim_emb() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() + grad_force = "direct" not in fitting_net["type"] + if not grad_force: + fitting_net["out_dim"] = descriptor.get_dim_emb() + if "ener" in fitting_net["type"]: + fitting_net["return_energy"] = True + fitting = BaseFitting(**fitting_net) + return descriptor, fitting, fitting_net["type"] + + +def _can_be_converted_to_float(value): + try: + float(value) + return True + except (TypeError, ValueError): + # return false for any failure... + return False + + +def _convert_preset_out_bias_to_array(preset_out_bias, type_map): + if preset_out_bias is not None: + for kk in preset_out_bias: + if len(preset_out_bias[kk]) != len(type_map): + raise ValueError( + "length of the preset_out_bias should be the same as the type_map" + ) + for jj in range(len(preset_out_bias[kk])): + if preset_out_bias[kk][jj] is not None: + if isinstance(preset_out_bias[kk][jj], list): + bb = preset_out_bias[kk][jj] + elif _can_be_converted_to_float(preset_out_bias[kk][jj]): + bb = [float(preset_out_bias[kk][jj])] + else: + raise ValueError( + f"unsupported type/value of the {jj}th element of " + f"preset_out_bias['{kk}'] " + f"{type(preset_out_bias[kk][jj])}" + ) + preset_out_bias[kk][jj] = np.array(bb) + return preset_out_bias + + +def get_standard_model(model_params): + model_params_old = model_params + model_params = copy.deepcopy(model_params) + ntypes = len(model_params["type_map"]) + descriptor, fitting, fitting_net_type = _get_standard_model_components( + model_params, ntypes + ) + atom_exclude_types = model_params.get("atom_exclude_types", []) + pair_exclude_types = model_params.get("pair_exclude_types", []) + preset_out_bias = model_params.get("preset_out_bias") + preset_out_bias = _convert_preset_out_bias_to_array( + preset_out_bias, model_params["type_map"] + ) + + if fitting_net_type in ["ener", "direct_force_ener"]: + modelcls = EnergyModel + else: + raise RuntimeError(f"Unknown fitting type: {fitting_net_type}") + + model = modelcls( + descriptor=descriptor, + fitting=fitting, + type_map=model_params["type_map"], + atom_exclude_types=atom_exclude_types, + pair_exclude_types=pair_exclude_types, + preset_out_bias=preset_out_bias, + ) + model.model_def_script = json.dumps(model_params_old) + return model + + +def get_model(model_params): + model_type = model_params.get("type", "standard") + if model_type == "standard": + return get_standard_model(model_params) + else: + return BaseModel.get_class_by_type(model_type).get_model(model_params) + + +__all__ = [ + "BaseModel", + "get_model", + "DPModelCommon", + "EnergyModel", + "FrozenModel", + "make_model", +] diff --git a/deepmd/pd/model/model/dp_model.py b/deepmd/pd/model/model/dp_model.py new file mode 100644 index 0000000000..1e1cee6826 --- /dev/null +++ b/deepmd/pd/model/model/dp_model.py @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + + +class DPModelCommon: + """A base class to implement common methods for all the Models.""" + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + local_jdata_cpy["descriptor"], min_nbor_dist = BaseDescriptor.update_sel( + train_data, type_map, local_jdata["descriptor"] + ) + return local_jdata_cpy, min_nbor_dist + + def get_fitting_net(self): + """Get the fitting network.""" + return self.atomic_model.fitting_net + + def get_descriptor(self): + """Get the descriptor.""" + return self.atomic_model.descriptor diff --git a/deepmd/pd/model/model/ener_model.py b/deepmd/pd/model/model/ener_model.py new file mode 100644 index 0000000000..3f3db4a527 --- /dev/null +++ b/deepmd/pd/model/model/ener_model.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) +from typing import ( + Optional, +) + +import paddle + +from deepmd.pd.model.atomic_model import ( + DPEnergyAtomicModel, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) + +from .dp_model import ( + DPModelCommon, +) +from .make_model import ( + make_model, +) + +DPEnergyModel_ = make_model(DPEnergyAtomicModel) + + +@BaseModel.register("ener") +class EnergyModel(DPModelCommon, DPEnergyModel_): + model_type = "ener" + + def __init__( + self, + *args, + **kwargs, + ): + DPModelCommon.__init__(self) + DPEnergyModel_.__init__(self, *args, **kwargs) + + def translated_output_def(self): + out_def_data = self.model_output_def().get_data() + output_def = { + "atom_energy": deepcopy(out_def_data["energy"]), + "energy": deepcopy(out_def_data["energy_redu"]), + } + if self.do_grad_r("energy"): + output_def["force"] = deepcopy(out_def_data["energy_derv_r"]) + output_def["force"].squeeze(-2) + if self.do_grad_c("energy"): + output_def["virial"] = deepcopy(out_def_data["energy_derv_c_redu"]) + output_def["virial"].squeeze(-2) + output_def["atom_virial"] = deepcopy(out_def_data["energy_derv_c"]) + output_def["atom_virial"].squeeze(-3) + if "mask" in out_def_data: + output_def["mask"] = deepcopy(out_def_data["mask"]) + return output_def + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + model_ret = self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + if self.get_fitting_net() is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["atom_virial"] = model_ret["energy_derv_c"].squeeze( + -3 + ) + else: + model_predict["force"] = model_ret["dforce"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + else: + model_predict = model_ret + model_predict["updated_coord"] += coord + return model_predict + + def forward_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + model_ret = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + comm_dict=comm_dict, + extra_nlist_sort=self.need_sorted_nlist_for_lower(), + ) + if self.get_fitting_net() is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["extended_force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["extended_virial"] = model_ret[ + "energy_derv_c" + ].squeeze(-3) + else: + assert model_ret["dforce"] is not None + model_predict["dforce"] = model_ret["dforce"] + else: + model_predict = model_ret + return model_predict diff --git a/deepmd/pd/model/model/frozen.py b/deepmd/pd/model/model/frozen.py new file mode 100644 index 0000000000..209cfca9c8 --- /dev/null +++ b/deepmd/pd/model/model/frozen.py @@ -0,0 +1,182 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + + +@BaseModel.register("frozen") +class FrozenModel(BaseModel): + """Load model from a frozen model, which cannot be trained. + + Parameters + ---------- + model_file : str + The path to the frozen model + """ + + def __init__(self, model_file: str, **kwargs): + super().__init__(**kwargs) + self.model_file = model_file + if model_file.endswith(".json"): + self.model = paddle.jit.load(model_file.split(".json")[0]) + else: + raise NotImplementedError( + "Only support .json file, " f"but received {model_file}" + ) + + def fitting_output_def(self) -> FittingOutputDef: + """Get the output def of developer implemented atomic models.""" + return self.model.fitting_output_def() + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.model.get_rcut() + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.model.get_type_map() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.model.get_sel() + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.model.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.model.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.model.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self.model.is_aparam_nall() + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.model.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.model.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the model needs sorted nlist when using `forward_lower`.""" + return self.model.need_sorted_nlist_for_lower() + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + return self.model.forward( + coord, + atype, + box=box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + # try to use the original script instead of "frozen model" + # Note: this cannot change the script of the parent model + # it may still try to load hard-coded filename, which might + # be a problem + return self.model.get_model_def_script() + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum neighbor distance.""" + return self.model.get_min_nbor_dist() + + def serialize(self) -> dict: + from deepmd.pd.model.model import ( + get_model, + ) + + # try to recover the original model + model_def_script = json.loads(self.get_model_def_script()) + model = get_model(model_def_script) + model.set_state_dict(self.model.state_dict()) + return model.serialize() + + @classmethod + def deserialize(cls, data: dict): + raise RuntimeError("Should not touch here.") + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.model.get_nnei() + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.model.get_nsel() + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + return local_jdata, None + + def model_output_type(self) -> str: + """Get the output type for the model.""" + return self.model.model_output_type() diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py new file mode 100644 index 0000000000..258ba5d2fc --- /dev/null +++ b/deepmd/pd/model/model/make_model.py @@ -0,0 +1,587 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel import ( + ModelOutputDef, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableCategory, + OutputVariableOperation, + check_operation_applied, +) +from deepmd.pd.model.atomic_model.base_atomic_model import ( + BaseAtomicModel, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.pd.model.model.transform_output import ( + communicate_extended_output, + fit_output_to_model_output, +) +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.env import ( + GLOBAL_PD_ENER_FLOAT_PRECISION, + GLOBAL_PD_FLOAT_PRECISION, + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, + nlist_distinguish_types, +) +from deepmd.utils.path import ( + DPPath, +) + + +def make_model(T_AtomicModel: type[BaseAtomicModel]): + """Make a model as a derived class of an atomic model. + + The model provide two interfaces. + + 1. the `forward_common_lower`, that takes extended coordinates, atyps and neighbor list, + and outputs the atomic and property and derivatives (if required) on the extended region. + + 2. the `forward_common`, that takes coordinates, atypes and cell and predicts + the atomic and reduced property, and derivatives (if required) on the local region. + + Parameters + ---------- + T_AtomicModel + The atomic model. + + Returns + ------- + CM + The model. + + """ + + class CM(BaseModel): + def __init__( + self, + *args, + # underscore to prevent conflict with normal inputs + atomic_model_: Optional[T_AtomicModel] = None, + **kwargs, + ): + super().__init__(*args, **kwargs) + if atomic_model_ is not None: + self.atomic_model: T_AtomicModel = atomic_model_ + else: + self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) + self.precision_dict = PRECISION_DICT + self.reverse_precision_dict = RESERVED_PRECISON_DICT + self.global_pd_float_precision = GLOBAL_PD_FLOAT_PRECISION + self.global_pd_ener_float_precision = GLOBAL_PD_ENER_FLOAT_PRECISION + + def model_output_def(self): + """Get the output def for the model.""" + return ModelOutputDef(self.atomic_output_def()) + + def model_output_type(self) -> list[str]: + """Get the output type for the model.""" + output_def = self.model_output_def() + var_defs = output_def.var_defs + # jit: Comprehension ifs are not supported yet + # type hint is critical for JIT + vars: list[str] = [] + for kk, vv in var_defs.items(): + # .value is critical for JIT + if vv.category == OutputVariableCategory.OUT.value: + vars.append(kk) + return vars + + # cannot use the name forward. paddle script does not work + def forward_common( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type Dict[str,paddle.Tensor]. + The keys are defined by the `ModelOutputDef`. + + """ + cc, bb, fp, ap, input_prec = self.input_type_cast( + coord, box=box, fparam=fparam, aparam=aparam + ) + del coord, box, fparam, aparam + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + cc, + atype, + self.get_rcut(), + self.get_sel(), + mixed_types=self.mixed_types(), + box=bb, + ) + model_predict_lower = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + do_atomic_virial=do_atomic_virial, + fparam=fp, + aparam=ap, + ) + model_predict = communicate_extended_output( + model_predict_lower, + self.model_output_def(), + mapping, + do_atomic_virial=do_atomic_virial, + ) + model_predict = self.output_type_cast(model_predict, input_prec) + return model_predict + + def get_out_bias(self) -> paddle.Tensor: + return self.atomic_model.get_out_bias() + + def set_out_bias(self, out_bias: paddle.Tensor) -> None: + self.atomic_model.set_out_bias(out_bias) + + def change_out_bias( + self, + merged, + bias_adjust_mode="change-by-statistic", + ) -> None: + """Change the output bias of atomic model according to the input data and the pretrained model. + + Parameters + ---------- + merged : Union[Callable[[], List[dict]], List[dict]] + - List[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + bias_adjust_mode : str + The mode for changing output bias : ['change-by-statistic', 'set-by-statistic'] + 'change-by-statistic' : perform predictions on labels of target dataset, + and do least square on the errors to obtain the target shift as bias. + 'set-by-statistic' : directly use the statistic output bias in the target dataset. + """ + self.atomic_model.change_out_bias( + merged, + bias_adjust_mode=bias_adjust_mode, + ) + + def forward_common_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + extra_nlist_sort: bool = False, + ): + """Return model prediction. Lower interface that takes + extended atomic coordinates and types, nlist, and mapping + as input, and returns the predictions on the extended region. + The predictions are not reduced. + + Parameters + ---------- + extended_coord + coodinates in extended region. nf x (nall x 3) + extended_atype + atomic type in extended region. nf x nall + nlist + neighbor list. nf x nloc x nsel. + mapping + mapps the extended indices to local indices. nf x nall. + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + whether calculate atomic virial. + comm_dict + The data needed for communication for parallel inference. + extra_nlist_sort + whether to forcibly sort the nlist. + + Returns + ------- + result_dict + the result dict, defined by the `FittingOutputDef`. + + """ + nframes, nall = extended_atype.shape[:2] + extended_coord = extended_coord.reshape([nframes, -1, 3]) + nlist = self.format_nlist( + extended_coord, extended_atype, nlist, extra_nlist_sort=extra_nlist_sort + ) + cc_ext, _, fp, ap, input_prec = self.input_type_cast( + extended_coord, fparam=fparam, aparam=aparam + ) + del extended_coord, fparam, aparam + atomic_ret = self.atomic_model.forward_common_atomic( + cc_ext, + extended_atype, + nlist, + mapping=mapping, + fparam=fp, + aparam=ap, + comm_dict=comm_dict, + ) + model_predict = fit_output_to_model_output( + atomic_ret, + self.atomic_output_def(), + cc_ext, + do_atomic_virial=do_atomic_virial, + create_graph=self.training, + ) + model_predict = self.output_type_cast(model_predict, input_prec) + return model_predict + + def input_type_cast( + self, + coord: paddle.Tensor, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ) -> tuple[ + paddle.Tensor, + Optional[paddle.Tensor], + Optional[paddle.Tensor], + Optional[paddle.Tensor], + str, + ]: + """Cast the input data to global float type.""" + input_prec = self.reverse_precision_dict[coord.dtype] + ### + ### type checking would not pass jit, convert to coord prec anyway + ### + # for vv, kk in zip([fparam, aparam], ["frame", "atomic"]): + # if vv is not None and self.reverse_precision_dict[vv.dtype] != input_prec: + # log.warning( + # f"type of {kk} parameter {self.reverse_precision_dict[vv.dtype]}" + # " does not match" + # f" that of the coordinate {input_prec}" + # ) + _lst: list[Optional[paddle.Tensor]] = [ + vv.astype(coord.dtype) if vv is not None else None + for vv in [box, fparam, aparam] + ] + box, fparam, aparam = _lst + if ( + input_prec + == self.reverse_precision_dict[self.global_pd_float_precision] + ): + return coord, box, fparam, aparam, input_prec + else: + pp = self.global_pd_float_precision + return ( + coord.to(pp), + box.to(pp) if box is not None else None, + fparam.to(pp) if fparam is not None else None, + aparam.to(pp) if aparam is not None else None, + input_prec, + ) + + def output_type_cast( + self, + model_ret: dict[str, paddle.Tensor], + input_prec: str, + ) -> dict[str, paddle.Tensor]: + """Convert the model output to the input prec.""" + do_cast = ( + input_prec + != self.reverse_precision_dict[self.global_pd_float_precision] + ) + pp = self.precision_dict[input_prec] + odef = self.model_output_def() + for kk in odef.keys(): + if kk not in model_ret.keys(): + # do not return energy_derv_c if not do_atomic_virial + continue + if check_operation_applied(odef[kk], OutputVariableOperation.REDU): + model_ret[kk] = ( + model_ret[kk].to(self.global_pd_ener_float_precision) + if model_ret[kk] is not None + else None + ) + elif do_cast: + model_ret[kk] = ( + model_ret[kk].to(pp) if model_ret[kk] is not None else None + ) + return model_ret + + def format_nlist( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + extra_nlist_sort: bool = False, + ): + """Format the neighbor list. + + 1. If the number of neighbors in the `nlist` is equal to sum(self.sel), + it does nothong + + 2. If the number of neighbors in the `nlist` is smaller than sum(self.sel), + the `nlist` is pad with -1. + + 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), + the nearest sum(sel) neighbors will be preseved. + + Known limitations: + + In the case of not self.mixed_types, the nlist is always formatted. + May have side effact on the efficiency. + + Parameters + ---------- + extended_coord + coodinates in extended region. nf x nall x 3 + extended_atype + atomic type in extended region. nf x nall + nlist + neighbor list. nf x nloc x nsel + extra_nlist_sort + whether to forcibly sort the nlist. + + Returns + ------- + formated_nlist + the formated nlist. + + """ + mixed_types = self.mixed_types() + nlist = self._format_nlist( + extended_coord, + nlist, + sum(self.get_sel()), + extra_nlist_sort=extra_nlist_sort, + ) + if not mixed_types: + nlist = nlist_distinguish_types(nlist, extended_atype, self.get_sel()) + return nlist + + def _format_nlist( + self, + extended_coord: paddle.Tensor, + nlist: paddle.Tensor, + nnei: int, + extra_nlist_sort: bool = False, + ): + n_nf, n_nloc, n_nnei = nlist.shape + # nf x nall x 3 + extended_coord = extended_coord.reshape([n_nf, -1, 3]) + rcut = self.get_rcut() + + if n_nnei < nnei: + nlist = paddle.concat( + [ + nlist, + -1 + * paddle.ones( + [n_nf, n_nloc, nnei - n_nnei], + dtype=nlist.dtype, + ), + ], + axis=-1, + ) + + if n_nnei > nnei or extra_nlist_sort: + n_nf, n_nloc, n_nnei = nlist.shape + m_real_nei = nlist >= 0 + nlist = paddle.where(m_real_nei, nlist, paddle.zeros_like(nlist)) + # nf x nloc x 3 + coord0 = extended_coord[:, :n_nloc, :] + # nf x (nloc x nnei) x 3 + index = nlist.reshape([n_nf, n_nloc * n_nnei, 1]).expand([-1, -1, 3]) + coord1 = decomp.take_along_axis(extended_coord, axis=1, indices=index) + # nf x nloc x nnei x 3 + coord1 = coord1.reshape([n_nf, n_nloc, n_nnei, 3]) + # nf x nloc x nnei + # rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = decomp.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = paddle.where(m_real_nei, rr, float("inf")) + rr, nlist_mapping = ( + paddle.sort(rr, axis=-1), + paddle.argsort(rr, axis=-1), + ) + nlist = decomp.take_along_axis(nlist, axis=2, indices=nlist_mapping) + nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist) + nlist = nlist[..., :nnei] + else: # not extra_nlist_sort and n_nnei <= nnei: + pass # great! + assert nlist.shape[-1] == nnei + return nlist + + def do_grad_r( + self, + var_name: Optional[str] = None, + ) -> bool: + """Tell if the output variable `var_name` is r_differentiable. + if var_name is None, returns if any of the variable is r_differentiable. + """ + return self.atomic_model.do_grad_r(var_name) + + def do_grad_c( + self, + var_name: Optional[str] = None, + ) -> bool: + """Tell if the output variable `var_name` is c_differentiable. + if var_name is None, returns if any of the variable is c_differentiable. + """ + return self.atomic_model.do_grad_c(var_name) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.atomic_model.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.atomic_model + if model_with_new_type_stat is not None + else None, + ) + + def serialize(self) -> dict: + return self.atomic_model.serialize() + + @classmethod + def deserialize(cls, data) -> "CM": + return cls(atomic_model_=T_AtomicModel.deserialize(data)) + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.atomic_model.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.atomic_model.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.atomic_model.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self.atomic_model.is_aparam_nall() + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.atomic_model.get_rcut() + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.atomic_model.get_type_map() + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.atomic_model.get_nsel() + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.atomic_model.get_nnei() + + def atomic_output_def(self) -> FittingOutputDef: + """Get the output def of the atomic model.""" + return self.atomic_model.atomic_output_def() + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """Compute or load the statistics.""" + return self.atomic_model.compute_or_load_stat(sampled_func, stat_file_path) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.atomic_model.get_sel() + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.atomic_model.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the model has message passing.""" + return self.atomic_model.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the model needs sorted nlist when using `forward_lower`.""" + return self.atomic_model.need_sorted_nlist_for_lower() + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + # directly call the forward_common method when no specific transform rule + return self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + return CM diff --git a/deepmd/pd/model/model/model.py b/deepmd/pd/model/model/model.py new file mode 100644 index 0000000000..06a2c6910f --- /dev/null +++ b/deepmd/pd/model/model/model.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel.model.base_model import ( + make_base_model, +) +from deepmd.utils.path import ( + DPPath, +) + + +class BaseModel(paddle.nn.Layer, make_base_model()): + def __init__(self, *args, **kwargs): + """Construct a basic model for different tasks.""" + paddle.nn.Layer.__init__(self) + self.model_def_script = "" + self.min_nbor_dist = None + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute or load the statistics parameters of the model, + such as mean and standard deviation of descriptors or the energy bias of the fitting net. + When `sampled` is provided, all the statistics parameters will be calculated (or re-calculated for update), + and saved in the `stat_file_path`(s). + When `sampled` is not provided, it will check the existence of `stat_file_path`(s) + and load the calculated statistics parameters. + + Parameters + ---------- + sampled_func + The sampled data frames from different data systems. + stat_file_path + The path to the statistics files. + """ + raise NotImplementedError + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + return self.model_def_script + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum distance between two atoms.""" + return self.min_nbor_dist + + def get_ntypes(self): + """Returns the number of element types.""" + return len(self.get_type_map()) diff --git a/deepmd/pd/model/model/transform_output.py b/deepmd/pd/model/model/transform_output.py new file mode 100644 index 0000000000..148258d8f2 --- /dev/null +++ b/deepmd/pd/model/model/transform_output.py @@ -0,0 +1,268 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, + get_deriv_name, + get_reduce_name, +) +from deepmd.pd.utils import ( + decomp, + env, +) + + +def atomic_virial_corr( + extended_coord: paddle.Tensor, + atom_energy: paddle.Tensor, +): + nall = extended_coord.shape[1] + nloc = atom_energy.shape[1] + coord, _ = paddle.split(extended_coord, [nloc, nall - nloc], axis=1) + # no derivative with respect to the loc coord. + coord = coord.detach() + ce = coord * atom_energy + sumce0, sumce1, sumce2 = paddle.split(paddle.sum(ce, axis=1), [1, 1, 1], axis=-1) + faked_grad = paddle.ones_like(sumce0) + # lst = paddle.jit.annotate(List[Optional[paddle.Tensor]], [faked_grad]) + extended_virial_corr0 = paddle.autograd.grad( + [sumce0], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr0 is not None + extended_virial_corr1 = paddle.autograd.grad( + [sumce1], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr1 is not None + extended_virial_corr2 = paddle.autograd.grad( + [sumce2], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr2 is not None + extended_virial_corr = paddle.concat( + [ + extended_virial_corr0.unsqueeze(-1), + extended_virial_corr1.unsqueeze(-1), + extended_virial_corr2.unsqueeze(-1), + ], + axis=-1, + ) + return extended_virial_corr + + +def task_deriv_one( + atom_energy: paddle.Tensor, + energy: paddle.Tensor, + extended_coord: paddle.Tensor, + do_virial: bool = True, + do_atomic_virial: bool = False, + create_graph: bool = True, +): + # faked_grad = paddle.ones_like(energy) + # lst = paddle.jit.annotate(List[Optional[paddle.Tensor]], [faked_grad]) + extended_force = paddle.autograd.grad( + [energy], + [extended_coord], + # grad_outputs=lst, + create_graph=create_graph, + retain_graph=True, + )[0] + assert extended_force is not None + extended_force = -extended_force + if do_virial: + extended_virial = extended_force.unsqueeze(-1) @ extended_coord.unsqueeze(-2) + # the correction sums to zero, which does not contribute to global virial + if do_atomic_virial: + extended_virial_corr = atomic_virial_corr(extended_coord, atom_energy) + extended_virial = extended_virial + extended_virial_corr + # to [...,3,3] -> [...,9] + extended_virial = extended_virial.reshape( + [*list(extended_virial.shape[:-2]), 9] + ) + else: + extended_virial = None + return extended_force, extended_virial + + +def get_leading_dims( + vv: paddle.Tensor, + vdef: OutputVariableDef, +): + """Get the dimensions of nf x nloc.""" + vshape = vv.shape + return list(vshape[: (len(vshape) - len(vdef.shape))]) + + +def get_atom_axis( + vdef: paddle.Tensor, +): + """Get the axis of atoms.""" + atom_axis = -(len(vdef.shape) + 1) + return atom_axis + + +def take_deriv( + vv: paddle.Tensor, + svv: paddle.Tensor, + vdef: OutputVariableDef, + coord_ext: paddle.Tensor, + do_virial: bool = False, + do_atomic_virial: bool = False, + create_graph: bool = True, +): + size = 1 + for ii in vdef.shape: + size *= ii + vv1 = vv.reshape(list(get_leading_dims(vv, vdef)) + [size]) # noqa: RUF005 + svv1 = svv.reshape(list(get_leading_dims(svv, vdef)) + [size]) # noqa: RUF005 + split_vv1 = paddle.split(vv1, [1] * size, axis=-1) + split_svv1 = paddle.split(svv1, [1] * size, axis=-1) + split_ff, split_avir = [], [] + for vvi, svvi in zip(split_vv1, split_svv1): + # nf x nloc x 3, nf x nloc x 9 + ffi, aviri = task_deriv_one( + vvi, + svvi, + coord_ext, + do_virial=do_virial, + do_atomic_virial=do_atomic_virial, + create_graph=create_graph, + ) + # nf x nloc x 1 x 3, nf x nloc x 1 x 9 + ffi = ffi.unsqueeze(-2) + split_ff.append(ffi) + if do_virial: + assert aviri is not None + aviri = aviri.unsqueeze(-2) + split_avir.append(aviri) + # nf x nall x v_dim x 3, nf x nall x v_dim x 9 + out_lead_shape = list(coord_ext.shape[:-1]) + vdef.shape + ff = paddle.concat(split_ff, axis=-2).reshape(out_lead_shape + [3]) # noqa: RUF005 + if do_virial: + avir = paddle.concat(split_avir, axis=-2).reshape(out_lead_shape + [9]) # noqa: RUF005 + else: + avir = None + return ff, avir + + +def fit_output_to_model_output( + fit_ret: dict[str, paddle.Tensor], + fit_output_def: FittingOutputDef, + coord_ext: paddle.Tensor, + do_atomic_virial: bool = False, + create_graph: bool = True, +) -> dict[str, paddle.Tensor]: + """Transform the output of the fitting network to + the model output. + + """ + redu_prec = env.GLOBAL_PD_ENER_FLOAT_PRECISION + model_ret = dict(fit_ret.items()) + for kk, vv in fit_ret.items(): + vdef = fit_output_def[kk] + shap = vdef.shape + atom_axis = -(len(shap) + 1) + if vdef.reducible: + kk_redu = get_reduce_name(kk) + model_ret[kk_redu] = paddle.sum(vv.astype(redu_prec), axis=atom_axis) + if vdef.r_differentiable: + kk_derv_r, kk_derv_c = get_deriv_name(kk) + dr, dc = take_deriv( + vv, + model_ret[kk_redu], + vdef, + coord_ext, + do_virial=vdef.c_differentiable, + do_atomic_virial=do_atomic_virial, + create_graph=create_graph, + ) + model_ret[kk_derv_r] = dr + if vdef.c_differentiable: + assert dc is not None + model_ret[kk_derv_c] = dc + model_ret[kk_derv_c + "_redu"] = paddle.sum( + model_ret[kk_derv_c].astype(redu_prec), axis=1 + ) + return model_ret + + +def communicate_extended_output( + model_ret: dict[str, paddle.Tensor], + model_output_def: ModelOutputDef, + mapping: paddle.Tensor, # nf x nloc + do_atomic_virial: bool = False, +) -> dict[str, paddle.Tensor]: + """Transform the output of the model network defined on + local and ghost (extended) atoms to local atoms. + + """ + redu_prec = env.GLOBAL_PD_ENER_FLOAT_PRECISION + new_ret = {} + for kk in model_output_def.keys_outp(): + vv = model_ret[kk] + vdef = model_output_def[kk] + new_ret[kk] = vv + if vdef.reducible: + kk_redu = get_reduce_name(kk) + new_ret[kk_redu] = model_ret[kk_redu] + # nf x nloc + vldims = get_leading_dims(vv, vdef) + # nf x nall + mldims = list(mapping.shape) + kk_derv_r, kk_derv_c = get_deriv_name(kk) + if vdef.r_differentiable: + # vdim x 3 + derv_r_ext_dims = list(vdef.shape) + [3] # noqa:RUF005 + mapping = mapping.reshape(mldims + [1] * len(derv_r_ext_dims)).expand( + [-1] * len(mldims) + derv_r_ext_dims + ) + force = paddle.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype).to( + device=vv.place + ) + # nf x nloc x nvar x 3 + new_ret[kk_derv_r] = decomp.scatter_reduce( + force, + 1, + index=mapping, + src=model_ret[kk_derv_r], + reduce="sum", + ) + if vdef.c_differentiable: + assert vdef.r_differentiable + derv_c_ext_dims = list(vdef.shape) + [9] # noqa:RUF005 + # nf x nloc x nvar x 3 -> nf x nloc x nvar x 9 + mapping = paddle.tile( + mapping, + [1] * (len(mldims) + len(vdef.shape)) + [3], + ) + virial = paddle.zeros(vldims + derv_c_ext_dims, dtype=vv.dtype).to( + device=vv.place + ) + # nf x nloc x nvar x 9 + new_ret[kk_derv_c] = decomp.scatter_reduce( + virial, + 1, + index=mapping, + src=model_ret[kk_derv_c], + reduce="sum", + ) + new_ret[kk_derv_c + "_redu"] = paddle.sum( + new_ret[kk_derv_c].to(redu_prec), axis=1 + ) + if not do_atomic_virial: + # pop atomic virial, because it is not correctly calculated. + new_ret.pop(kk_derv_c) + return new_ret diff --git a/deepmd/pd/model/network/__init__.py b/deepmd/pd/model/network/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/model/network/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/model/network/init.py b/deepmd/pd/model/network/init.py new file mode 100644 index 0000000000..dbdad56794 --- /dev/null +++ b/deepmd/pd/model/network/init.py @@ -0,0 +1,458 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# Copyright (c) 2024 The PyTorch Authors. All rights reserved. +# +# This file includes source code from PyTorch of version v2.3.0, which is released under the BSD-3-Clause license. +# For more information about PyTorch, visit https://pytorch.org/. + + +# These no_grad_* functions are necessary as wrappers around the parts of these +# functions that use `with paddle.no_grad()`. The JIT doesn't support context +# managers, so these need to be implemented as builtins. Using these wrappers +# lets us keep those builtins small and re-usable. + +from __future__ import ( + annotations, +) + +import math +import warnings + +import paddle +from paddle import ( + Tensor, +) + +PaddleGenerator = paddle.base.libpaddle.Generator + + +def _no_grad_uniform_(tensor: paddle.Tensor, a, b, generator=None): + with paddle.no_grad(): + return tensor.uniform_(a, b) + + +def _no_grad_normal_(tensor: paddle.Tensor, mean, std, generator=None): + with paddle.no_grad(): + return tensor.normal_(mean, std) + + +def _no_grad_trunc_normal_(tensor: paddle.Tensor, mean, std, a, b, generator=None): + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + with paddle.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.multiply_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clip_(min=a, max=b) + return tensor + + +def _no_grad_zero_(tensor: paddle.Tensor): + with paddle.no_grad(): + return tensor.zero_() + + +def _no_grad_fill_(tensor: paddle.Tensor, val): + with paddle.no_grad(): + return tensor.fill_(val) + + +def calculate_gain(nonlinearity, param=None): + r"""Return the recommended gain value for the given nonlinearity function. + + The values are as follows: + + ================= ==================================================== + nonlinearity gain + ================= ==================================================== + Linear / Identity :math:`1` + Conv{1,2,3}D :math:`1` + Sigmoid :math:`1` + Tanh :math:`\frac{5}{3}` + ReLU :math:`\sqrt{2}` + Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}` + SELU :math:`\frac{3}{4}` + ================= ==================================================== + + .. warning:: + In order to implement `Self-Normalizing Neural Networks`_ , + you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``. + This gives the initial weights a variance of ``1 / N``, + which is necessary to induce a stable fixed point in the forward pass. + In contrast, the default gain for ``SELU`` sacrifices the normalization + effect for more stable gradient flow in rectangular layers. + + Args: + nonlinearity: the non-linear function (`nn.functional` name) + param: optional parameter for the non-linear function + + Examples + -------- + >>> gain = nn.init.calculate_gain( + ... "leaky_relu", 0.2 + ... ) # leaky_relu with negative_slope=0.2 + + .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html + """ + linear_fns = [ + "linear", + "conv1d", + "conv2d", + "conv3d", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + ] + if nonlinearity in linear_fns or nonlinearity == "sigmoid": + return 1 + elif nonlinearity == "tanh": + return 5.0 / 3 + elif nonlinearity == "relu": + return math.sqrt(2.0) + elif nonlinearity == "leaky_relu": + if param is None: + negative_slope = 0.01 + elif ( + not isinstance(param, bool) + and isinstance(param, int) + or isinstance(param, float) + ): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError(f"negative_slope {param} not a valid number") + return math.sqrt(2.0 / (1 + negative_slope**2)) + elif nonlinearity == "selu": + return ( + 3.0 / 4 + ) # Value found empirically (https://github.com/pytorch/pytorch/pull/50664) + else: + raise ValueError(f"Unsupported nonlinearity {nonlinearity}") + + +def _calculate_fan_in_and_fan_out(tensor, reverse=False): + dimensions = tensor.ndim + if dimensions < 2: + raise ValueError( + "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" + ) + + if reverse: + num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] + else: + num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] + + receptive_field_size = 1 + if tensor.ndim > 2: + for s in tensor.shape[2:]: + receptive_field_size *= s + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def _calculate_correct_fan(tensor, mode, reverse=False): + mode = mode.lower() + valid_modes = ["fan_in", "fan_out"] + if mode not in valid_modes: + raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) + return fan_in if mode == "fan_in" else fan_out + + +def zeros_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `0`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.zeros_(w) + """ + return _no_grad_zero_(tensor) + + +def ones_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `1`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.ones_(w) + """ + return _no_grad_fill_(tensor, 1.0) + + +def constant_(tensor: Tensor, val: float) -> Tensor: + r"""Fill the input Tensor with the value :math:`\text{val}`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + val: the value to fill the tensor with + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.constant_(w, 0.3) + """ + return _no_grad_fill_(tensor, val) + + +def normal_( + tensor: Tensor, + mean: float = 0.0, + std: float = 1.0, + generator: PaddleGenerator | None = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from the normal distribution. + + :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + generator: the paddle Generator to sample from (default: None) + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.normal_(w) + """ + return _no_grad_normal_(tensor, mean, std, generator) + + +def trunc_normal_( + tensor: Tensor, + mean: float = 0.0, + std: float = 1.0, + a: float = -2.0, + b: float = 2.0, + generator: PaddleGenerator | None = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from a truncated normal distribution. + + The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + generator: the paddle Generator to sample from (default: None) + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def kaiming_uniform_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: PaddleGenerator | None = None, + reverse: bool = False, +): + r"""Fill the input `Tensor` with values using a Kaiming uniform distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + + .. math:: + \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.kaiming_uniform_(w, mode="fan_in", nonlinearity="relu") + """ + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + with paddle.no_grad(): + return tensor.uniform_(-bound, bound) + + +def kaiming_normal_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: PaddleGenerator | None = None, + reverse: bool = False, +): + r"""Fill the input `Tensor` with values using a Kaiming normal distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.kaiming_normal_(w, mode="fan_out", nonlinearity="relu") + """ + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + with paddle.no_grad(): + return tensor.normal_(0, std) + + +def xavier_uniform_( + tensor: Tensor, + gain: float = 1.0, + generator: PaddleGenerator | None = None, + reverse: bool = False, +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier uniform distribution. + + The method is described in `Understanding the difficulty of training + deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-a, a)` where + + .. math:: + a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + gain: an optional scaling factor + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain("relu")) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + + return _no_grad_uniform_(tensor, -a, a, generator) + + +def xavier_normal_( + tensor: Tensor, + gain: float = 1.0, + generator: PaddleGenerator | None = None, + reverse: bool = False, +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier normal distribution. + + The method is described in `Understanding the difficulty of training deep feedforward + neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor + will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + gain: an optional scaling factor + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by + default as [fout, fin, ...]. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.xavier_normal_(w) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + + return _no_grad_normal_(tensor, 0.0, std, generator) diff --git a/deepmd/pd/model/network/layernorm.py b/deepmd/pd/model/network/layernorm.py new file mode 100644 index 0000000000..0d052cfb90 --- /dev/null +++ b/deepmd/pd/model/network/layernorm.py @@ -0,0 +1,163 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.network import LayerNorm as DPLayerNorm +from deepmd.pd.model.network.init import ( + normal_, + ones_, + zeros_, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + get_generator, + to_numpy_array, + to_paddle_tensor, +) + +device = env.DEVICE + + +def empty_t(shape, precision): + return paddle.empty(shape, dtype=precision).to(device=device) + + +class LayerNorm(nn.Layer): + def __init__( + self, + num_in, + eps: float = 1e-5, + uni_init: bool = True, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ): + super().__init__() + self.eps = eps + self.uni_init = uni_init + self.num_in = num_in + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t([num_in], self.prec), + ), + ) + self.bias = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign(empty_t([num_in], self.prec)), + ) + random_generator = get_generator(seed) + if self.uni_init: + ones_(self.matrix.data) + zeros_(self.bias.data) + else: + normal_(self.bias.data, mean=bavg, std=stddev, generator=random_generator) + normal_( + self.matrix.data, + std=stddev / np.sqrt(self.num_in), + generator=random_generator, + ) + self.trainable = trainable + if not self.trainable: + self.matrix.stop_gradient = True + self.bias.stop_gradient = True + + def dim_out(self) -> int: + return self.matrix.shape[0] + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """One Layer Norm used by DP model. + + Parameters + ---------- + xx : paddle.Tensor + The input of index. + + Returns + ------- + yy: paddle.Tensor + The output. + """ + if xx.numel() > 0: + variance, mean = ( + paddle.var(xx, axis=-1, unbiased=False, keepdim=True), + paddle.mean(xx, axis=-1, keepdim=True), + ) + yy = (xx - mean) / paddle.sqrt(variance + self.eps) + else: + yy = xx + if self.matrix is not None and self.bias is not None: + yy = yy * self.matrix + self.bias + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = DPLayerNorm( + self.matrix.shape[0], + eps=self.eps, + trainable=self.trainable, + precision=self.precision, + ) + nl.w = to_numpy_array(self.matrix) + nl.b = to_numpy_array(self.bias) + data = nl.serialize() + return data + + @classmethod + def deserialize(cls, data: dict) -> "LayerNorm": + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = DPLayerNorm.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + eps=nl["eps"], + trainable=nl["trainable"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + if nl[ss] is not None: + tensor = to_paddle_tensor(nl[ss]) + return paddle.create_parameter( + tensor.shape, + dtype=tensor.dtype, + default_initializer=nn.initializer.Assign(tensor), + ) + return None + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + return obj diff --git a/deepmd/pd/model/network/mlp.py b/deepmd/pd/model/network/mlp.py new file mode 100644 index 0000000000..370b0fa8fa --- /dev/null +++ b/deepmd/pd/model/network/mlp.py @@ -0,0 +1,328 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +from typing import ( + ClassVar, +) + +import numpy as np +import paddle +import paddle.nn as nn + +from deepmd.pd.utils import ( + env, +) + +device = env.DEVICE + +from deepmd.dpmodel.utils import ( + NativeLayer, +) +from deepmd.dpmodel.utils import NetworkCollection as DPNetworkCollection +from deepmd.dpmodel.utils import ( + make_embedding_network, + make_fitting_network, + make_multilayer_network, +) +from deepmd.pd.model.network.init import ( + PaddleGenerator, + kaiming_normal_, + normal_, + trunc_normal_, + xavier_uniform_, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + ActivationFn, + get_generator, + to_numpy_array, + to_paddle_tensor, +) + + +def empty_t(shape, precision): + return paddle.empty(shape, dtype=precision).to(device=device) + + +class Identity(nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """The Identity operation layer.""" + return xx + + def serialize(self) -> dict: + return { + "@class": "Identity", + "@version": 1, + } + + @classmethod + def deserialize(cls, data: dict) -> Identity: + return Identity() + + +class MLPLayer(nn.Layer): + def __init__( + self, + num_in, + num_out, + bias: bool = True, + use_timestep: bool = False, + activation_function: str | None = None, + resnet: bool = False, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + init: str = "default", + seed: int | list[int] | None = None, + ): + super().__init__() + # only use_timestep when skip connection is established. + self.use_timestep = use_timestep and ( + num_out == num_in or num_out == num_in * 2 + ) + self.num_in = num_in + self.num_out = num_out + self.activate_name = activation_function + self.activate = ActivationFn(self.activate_name) + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = self.create_parameter( + (num_in, num_out), + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t((num_in, num_out), self.prec) + ), + ) + random_generator = get_generator(seed) + if bias: + self.bias = self.create_parameter( + [num_out], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t([num_out], self.prec) + ), + ) + else: + self.bias = None + if self.use_timestep: + self.idt = self.create_parameter( + [num_out], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t([num_out], self.prec) + ), + ) + else: + self.idt = None + self.resnet = resnet + if init == "default": + self._default_normal_init( + bavg=bavg, stddev=stddev, generator=random_generator + ) + elif init == "trunc_normal": + self._trunc_normal_init(1.0, generator=random_generator) + elif init == "relu": + self._trunc_normal_init(2.0, generator=random_generator) + elif init == "glorot": + self._glorot_uniform_init(generator=random_generator) + elif init == "gating": + self._zero_init(self.use_bias) + elif init == "kaiming_normal": + self._normal_init(generator=random_generator) + elif init == "final": + self._zero_init(False) + else: + raise ValueError(f"Unknown initialization method: {init}") + + def check_type_consistency(self): + precision = self.precision + + def check_var(var): + if var is not None: + # assertion "float64" == "double" would fail + assert PRECISION_DICT[var.dtype.name] is PRECISION_DICT[precision] + + check_var(self.matrix) + check_var(self.bias) + check_var(self.idt) + + def dim_in(self) -> int: + return self.matrix.shape[0] + + def dim_out(self) -> int: + return self.matrix.shape[1] + + def _default_normal_init( + self, + bavg: float = 0.0, + stddev: float = 1.0, + generator: PaddleGenerator | None = None, + ): + normal_( + self.matrix.data, + std=stddev / np.sqrt(self.num_out + self.num_in), + generator=generator, + ) + if self.bias is not None: + normal_(self.bias.data, mean=bavg, std=stddev, generator=generator) + if self.idt is not None: + normal_(self.idt.data, mean=0.1, std=0.001, generator=generator) + + def _trunc_normal_init(self, scale=1.0, generator: PaddleGenerator | None = None): + # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) + TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978 + _, fan_in = self.matrix.shape + scale = scale / max(1, fan_in) + std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR + trunc_normal_(self.matrix, mean=0.0, std=std, generator=generator) + + def _glorot_uniform_init(self, generator: PaddleGenerator | None = None): + xavier_uniform_(self.matrix, gain=1, generator=generator) + + def _zero_init(self, use_bias=True): + with paddle.no_grad(): + self.matrix.fill_(0.0) + if use_bias and self.bias is not None: + with paddle.no_grad(): + self.bias.fill_(1.0) + + def _normal_init(self, generator: PaddleGenerator | None = None): + kaiming_normal_(self.matrix, nonlinearity="linear", generator=generator) + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """One MLP layer used by DP model. + + Parameters + ---------- + xx : paddle.Tensor + The input. + + Returns + ------- + yy: paddle.Tensor + The output. + """ + ori_prec = xx.dtype + xx = xx.astype(self.prec) + yy = ( + paddle.matmul(xx, self.matrix) + self.bias + if self.bias is not None + else paddle.matmul(xx, self.matrix) + ) + yy = self.activate(yy).clone() + yy = yy * self.idt if self.idt is not None else yy + if self.resnet: + if xx.shape[-1] == yy.shape[-1]: + yy += xx + elif 2 * xx.shape[-1] == yy.shape[-1]: + yy += paddle.concat([xx, xx], axis=-1) + # else: + # yy = yy + yy = yy.astype(ori_prec) + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = NativeLayer( + self.matrix.shape[0], + self.matrix.shape[1], + bias=self.bias is not None, + use_timestep=self.idt is not None, + activation_function=self.activate_name, + resnet=self.resnet, + precision=self.precision, + ) + nl.w, nl.b, nl.idt = ( + to_numpy_array(self.matrix), + to_numpy_array(self.bias), + to_numpy_array(self.idt), + ) + return nl.serialize() + + @classmethod + def deserialize(cls, data: dict) -> MLPLayer: + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = NativeLayer.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + nl["matrix"].shape[1], + bias=nl["bias"] is not None, + use_timestep=nl["idt"] is not None, + activation_function=nl["activation_function"], + resnet=nl["resnet"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + if nl[ss] is not None: + tensor = to_paddle_tensor(nl[ss]) + return paddle.create_parameter( + tensor.shape, + dtype=tensor.dtype, + default_initializer=nn.initializer.Assign(tensor), + ) + return None + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + obj.idt = check_load_param("idt") + return obj + + +MLP_ = make_multilayer_network(MLPLayer, nn.Layer) + + +class MLP(MLP_): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.layers = paddle.nn.LayerList(self.layers) + + forward = MLP_.call + + +EmbeddingNet = make_embedding_network(MLP, MLPLayer) + +FittingNet = make_fitting_network(EmbeddingNet, MLP, MLPLayer) + + +class NetworkCollection(DPNetworkCollection, nn.Layer): + """Paddle implementation of NetworkCollection.""" + + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { + "network": MLP, + "embedding_network": EmbeddingNet, + "fitting_network": FittingNet, + } + + def __init__(self, *args, **kwargs): + # init both two base classes + DPNetworkCollection.__init__(self, *args, **kwargs) + nn.Layer.__init__(self) + self.networks = self._networks = paddle.nn.LayerList(self._networks) diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py new file mode 100644 index 0000000000..21d6586476 --- /dev/null +++ b/deepmd/pd/model/network/network.py @@ -0,0 +1,555 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from deepmd.pd.model.network import ( + init, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +try: + from typing import ( + Final, + ) +except ImportError: + from paddle.jit import Final + +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, +) +from deepmd.pd.utils.utils import ( + ActivationFn, + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) + + +def Tensor(*shape): + return paddle.empty(shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + + +class SimpleLinear(nn.Layer): + use_timestep: Final[bool] + + def __init__( + self, + num_in, + num_out, + bavg=0.0, + stddev=1.0, + use_timestep=False, + activate=None, + bias: bool = True, + ): + """Construct a linear layer. + + Args: + - num_in: Width of input tensor. + - num_out: Width of output tensor. + - use_timestep: Apply time-step to weight. + - activate: type of activate func. + """ + super().__init__() + self.num_in = num_in + self.num_out = num_out + self.use_timestep = use_timestep + self.activate = ActivationFn(activate) + + self.matrix = self.create_parameter( + [num_in, num_out], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ) + init.normal_(self.matrix, std=stddev / np.sqrt(num_out + num_in)) + if bias: + self.bias = self.create_parameter( + (1, num_out), + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ) + init.normal_(self.bias, mean=bavg, std=stddev) + else: + self.bias = None + if self.use_timestep: + self.idt = self.create_parameter( + (1, num_out), + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ) + init.normal_(self.idt, mean=0.1, std=0.001) + + def forward(self, inputs): + """Return X*W+b.""" + xw = paddle.matmul(inputs, self.matrix) + hidden = xw + self.bias if self.bias is not None else xw + hidden = self.activate(hidden) + if self.use_timestep: + hidden = hidden * self.idt + return hidden + + +class Linear(nn.Linear): + def __init__( + self, + d_in: int, + d_out: int, + bias: bool = True, + init: str = "default", + ): + super().__init__( + d_in, + d_out, + bias=bias, + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + device=env.DEVICE, + ) + + self.use_bias = bias + + if self.use_bias: + with paddle.no_grad(): + self.bias.fill_(0) + + if init == "default": + self._trunc_normal_init(1.0) + elif init == "relu": + self._trunc_normal_init(2.0) + elif init == "glorot": + self._glorot_uniform_init() + elif init == "gating": + self._zero_init(self.use_bias) + elif init == "normal": + self._normal_init() + elif init == "final": + self._zero_init(False) + else: + raise ValueError("Invalid init method.") + + def _trunc_normal_init(self, scale=1.0): + # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) + TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978 + _, fan_in = self.weight.shape + scale = scale / max(1, fan_in) + std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR + init.trunc_normal_(self.weight, mean=0.0, std=std) + + def _glorot_uniform_init(self): + init.xavier_uniform_(self.weight, gain=1) + + def _zero_init(self, use_bias=True): + with paddle.no_grad(): + self.weight.fill_(0.0) + if use_bias: + with paddle.no_grad(): + self.bias.fill_(1.0) + + def _normal_init(self): + init.kaiming_normal_(self.weight, nonlinearity="linear") + + +class NonLinearHead(nn.Layer): + def __init__(self, input_dim, out_dim, activation_fn, hidden=None): + super().__init__() + hidden = input_dim if not hidden else hidden + self.linear1 = SimpleLinear(input_dim, hidden, activate=activation_fn) + self.linear2 = SimpleLinear(hidden, out_dim) + + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + + +class MaskLMHead(nn.Layer): + """Head for masked language modeling.""" + + def __init__(self, embed_dim, output_dim, activation_fn, weight=None): + super().__init__() + self.dense = SimpleLinear(embed_dim, embed_dim) + self.activation_fn = ActivationFn(activation_fn) + self.layer_norm = nn.LayerNorm(embed_dim) + + if weight is None: + weight = nn.Linear(embed_dim, output_dim, bias_attr=False).weight + self.weight = weight.T + self.bias = self.create_parameter( + [output_dim], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + default_initializer=nn.initializer.Constant(0), # pylint: disable=no-explicit-dtype,no-explicit-device + ) + + def forward( + self, features, masked_tokens: Optional[paddle.Tensor] = None, **kwargs + ): + # Only project the masked tokens while training, + # saves both memory and computation + if masked_tokens is not None: + features = features[masked_tokens, :] + + x = self.dense(features) + x = self.activation_fn(x) + x = self.layer_norm(x) + # project back to size of vocabulary with bias + x = F.linear(x, self.weight) + self.bias + return x + + +class ResidualDeep(nn.Layer): + def __init__( + self, type_id, embedding_width, neuron, bias_atom_e, out_dim=1, resnet_dt=False + ): + """Construct a filter on the given element as neighbor. + + Args: + - typei: Element ID. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the embedding net. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.type_id = type_id + self.neuron = [embedding_width, *neuron] + self.out_dim = out_dim + + deep_layers = [] + for ii in range(1, len(self.neuron)): + one = SimpleLinear( + num_in=self.neuron[ii - 1], + num_out=self.neuron[ii], + use_timestep=( + resnet_dt and ii > 1 and self.neuron[ii - 1] == self.neuron[ii] + ), + activate="tanh", + ) + deep_layers.append(one) + self.deep_layers = nn.LayerList(deep_layers) + if not env.ENERGY_BIAS_TRAINABLE: + bias_atom_e = 0 + self.final_layer = SimpleLinear(self.neuron[-1], self.out_dim, bias_atom_e) + + def forward(self, inputs): + """Calculate decoded embedding for each atom. + + Args: + - inputs: Embedding net output per atom. Its shape is [nframes*nloc, self.embedding_width]. + + Returns + ------- + - `paddle.Tensor`: Output layer with shape [nframes*nloc, self.neuron[-1]]. + """ + outputs = inputs + for idx, linear in enumerate(self.deep_layers): + if idx > 0 and linear.num_in == linear.num_out: + outputs = outputs + linear(outputs) + else: + outputs = linear(outputs) + outputs = self.final_layer(outputs) + return outputs + + +class TypeEmbedNet(nn.Layer): + def __init__( + self, + type_nums, + embed_dim, + bavg=0.0, + stddev=1.0, + precision="default", + seed: Optional[Union[int, list[int]]] = None, + use_econf_tebd=False, + use_tebd_bias: bool = False, + type_map=None, + ): + """Construct a type embedding net.""" + super().__init__() + self.type_nums = type_nums + self.embed_dim = embed_dim + self.bavg = bavg + self.stddev = stddev + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.embedding = TypeEmbedNetConsistent( + ntypes=self.type_nums, + neuron=[self.embed_dim], + padding=True, + activation_function="Linear", + use_econf_tebd=use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + precision=precision, + seed=seed, + ) + # init.normal_(self.embedding.weight[:-1], mean=bavg, std=stddev) + + def forward(self, atype): + """ + Args: + atype: Type of each input, [nframes, nloc] or [nframes, nloc, nnei]. + + Returns + ------- + type_embedding: + + """ + return self.embedding(atype.place)[atype] + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only TypeEmbedNet of the same type can share params!" + if shared_level == 0: + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.embedding.change_type_map(type_map=type_map) + + +class TypeEmbedNetConsistent(nn.Layer): + r"""Type embedding network that is consistent with other backends. + + Parameters + ---------- + ntypes : int + Number of atom types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + resnet_dt + Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + padding + Concat the zero padding to the output, as the default embedding of empty type. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + """ + + def __init__( + self, + *, + ntypes: int, + neuron: list[int], + resnet_dt: bool = False, + activation_function: str = "tanh", + precision: str = "default", + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + padding: bool = False, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + ): + """Construct a type embedding net.""" + super().__init__() + self.ntypes = ntypes + self.neuron = neuron + self.seed = seed + self.resnet_dt = resnet_dt + self.precision = precision + self.prec = env.PRECISION_DICT[self.precision] + self.activation_function = str(activation_function) + self.trainable = trainable + self.padding = padding + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.econf_tebd = None + embed_input_dim = ntypes + if self.use_econf_tebd: + econf_tebd, embed_input_dim = get_econf_tebd( + self.type_map, precision=self.precision + ) + self.econf_tebd = to_paddle_tensor(econf_tebd) + self.embedding_net = EmbeddingNet( + embed_input_dim, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + self.seed, + bias=self.use_tebd_bias, + ) + for param in self.parameters(): + param.stop_gradient = not trainable + + def forward(self, device: str): + """Caulate type embedding network. + + Returns + ------- + type_embedding: paddle.Tensor + Type embedding network. + """ + if not self.use_econf_tebd: + embed = self.embedding_net( + paddle.eye(self.ntypes, dtype=self.prec).to(device=device) + ) + else: + assert self.econf_tebd is not None + embed = self.embedding_net(self.econf_tebd.to(device)) + if self.padding: + embed = paddle.concat( + [ + embed, + paddle.zeros([1, embed.shape[1]], dtype=self.prec).to( + device=device + ), + ] + ) + return embed + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + if not self.use_econf_tebd: + do_resnet = self.neuron[0] in [ + self.ntypes, + self.ntypes * 2, + len(type_map), + len(type_map) * 2, + ] + assert ( + not do_resnet or self.activation_function == "Linear" + ), "'activation_function' must be 'Linear' when performing type changing on resnet structure!" + first_layer_matrix = self.embedding_net.layers[0].matrix + eye_vector = paddle.eye(self.ntypes, dtype=self.prec).to( + device=first_layer_matrix.place + ) + # preprocess for resnet connection + if self.neuron[0] == self.ntypes: + first_layer_matrix += eye_vector + elif self.neuron[0] == self.ntypes * 2: + first_layer_matrix += paddle.concat([eye_vector, eye_vector], axis=-1) + + # randomly initialize params for the unseen types + if has_new_type: + extend_type_params = paddle.rand( + [len(type_map), first_layer_matrix.shape[-1]], + dtype=first_layer_matrix.dtype, + ).to(device=first_layer_matrix.place) + first_layer_matrix = paddle.concat( + [first_layer_matrix, extend_type_params], axis=0 + ) + + first_layer_matrix = first_layer_matrix[remap_index] + new_ntypes = len(type_map) + eye_vector = paddle.eye(new_ntypes, dtype=self.prec).to( + device=first_layer_matrix.place + ) + + if self.neuron[0] == new_ntypes: + first_layer_matrix -= eye_vector + elif self.neuron[0] == new_ntypes * 2: + first_layer_matrix -= paddle.concat([eye_vector, eye_vector], axis=-1) + + self.embedding_net.layers[0].num_in = new_ntypes + self.embedding_net.layers[0].matrix = self.create_parameter( + first_layer_matrix.shape, + dtype=first_layer_matrix.dtype, + default_initializer=nn.initializer.Assign(first_layer_matrix), + ) + else: + econf_tebd, embed_input_dim = get_econf_tebd( + type_map, precision=self.precision + ) + self.econf_tebd = to_paddle_tensor(econf_tebd) + self.type_map = type_map + self.ntypes = len(type_map) + + @classmethod + def deserialize(cls, data: dict): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + TypeEmbedNetConsistent + The deserialized model + """ + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 2, 1) + data_cls = data.pop("@class") + assert data_cls == "TypeEmbedNet", f"Invalid class {data_cls}" + + embedding_net = EmbeddingNet.deserialize(data.pop("embedding")) + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + type_embedding_net = cls(**data) + type_embedding_net.embedding_net = embedding_net + return type_embedding_net + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + return { + "@class": "TypeEmbedNet", + "@version": 2, + "ntypes": self.ntypes, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "precision": self.precision, + "activation_function": self.activation_function, + "trainable": self.trainable, + "padding": self.padding, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + "embedding": self.embedding_net.serialize(), + } diff --git a/deepmd/pd/model/task/__init__.py b/deepmd/pd/model/task/__init__.py new file mode 100644 index 0000000000..1a36bff30c --- /dev/null +++ b/deepmd/pd/model/task/__init__.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .base_fitting import ( + BaseFitting, +) +from .ener import ( + EnergyFittingNet, + EnergyFittingNetDirect, +) +from .fitting import ( + Fitting, +) +from .type_predict import ( + TypePredictNet, +) + +__all__ = [ + "EnergyFittingNet", + "EnergyFittingNetDirect", + "Fitting", + "BaseFitting", + "TypePredictNet", +] diff --git a/deepmd/pd/model/task/base_fitting.py b/deepmd/pd/model/task/base_fitting.py new file mode 100644 index 0000000000..9ad3b801cd --- /dev/null +++ b/deepmd/pd/model/task/base_fitting.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.dpmodel.fitting import ( + make_base_fitting, +) + +BaseFitting = make_base_fitting(paddle.Tensor, fwd_method_name="forward") diff --git a/deepmd/pd/model/task/ener.py b/deepmd/pd/model/task/ener.py new file mode 100644 index 0000000000..24f563f799 --- /dev/null +++ b/deepmd/pd/model/task/ener.py @@ -0,0 +1,257 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) +from deepmd.pd.model.network.network import ( + ResidualDeep, +) +from deepmd.pd.model.task.fitting import ( + Fitting, + GeneralFitting, +) +from deepmd.pd.model.task.invar_fitting import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +@Fitting.register("ener") +class EnergyFittingNet(InvarFitting): + def __init__( + self, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, + **kwargs, + ): + super().__init__( + "energy", + ntypes, + dim_descrpt, + 1, + neuron=neuron, + bias_atom_e=bias_atom_e, + resnet_dt=resnet_dt, + numb_fparam=numb_fparam, + numb_aparam=numb_aparam, + activation_function=activation_function, + precision=precision, + mixed_types=mixed_types, + seed=seed, + type_map=type_map, + **kwargs, + ) + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("var_name") + data.pop("dim_out") + return super().deserialize(data) + + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + **super().serialize(), + "type": "ener", + } + + # make jit happy with paddle 2.0.0 + exclude_types: list[int] + + +@Fitting.register("direct_force") +@Fitting.register("direct_force_ener") +@fitting_check_output +class EnergyFittingNetDirect(Fitting): + def __init__( + self, + ntypes, + dim_descrpt, + neuron, + bias_atom_e=None, + out_dim=1, + resnet_dt=True, + use_tebd=True, + return_energy=False, + **kwargs, + ): + """Construct a fitting net for energy. + + Args: + - ntypes: Element count. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the fitting net. + - bias_atom_e: Average enery per atom for each element. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt + self.use_tebd = use_tebd + self.out_dim = out_dim + if bias_atom_e is None: + bias_atom_e = np.zeros([self.ntypes]) # pylint: disable=no-explicit-dtype + if not use_tebd: + assert self.ntypes == len(bias_atom_e), "Element count mismatches!" + bias_atom_e = paddle.to_tensor(bias_atom_e).to(device=env.DEVICE) # pylint: disable=no-explicit-dtype + self.register_buffer("bias_atom_e", bias_atom_e) + + filter_layers_dipole = [] + for type_i in range(self.ntypes): + one = ResidualDeep( + type_i, + dim_descrpt, + neuron, + 0.0, + out_dim=out_dim, + resnet_dt=resnet_dt, + ) + filter_layers_dipole.append(one) + self.filter_layers_dipole = paddle.nn.LayerList(filter_layers_dipole) + + self.return_energy = return_energy + filter_layers = [] + if self.return_energy: + for type_i in range(self.ntypes): + bias_type = 0.0 if self.use_tebd else bias_atom_e[type_i] + one = ResidualDeep( + type_i, dim_descrpt, neuron, bias_type, resnet_dt=resnet_dt + ) + filter_layers.append(one) + self.filter_layers = paddle.nn.LayerList(filter_layers) + + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "energy", + [1], + reducible=True, + r_differentiable=False, + c_differentiable=False, + ), + OutputVariableDef( + "dforce", + [3], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def deserialize(self) -> "EnergyFittingNetDirect": + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> list[str]: + raise NotImplementedError + + def forward( + self, + inputs: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ) -> tuple[paddle.Tensor, None]: + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + + Returns + ------- + - `paddle.Tensor`: Total energy with shape [nframes, natoms[0]]. + """ + nframes, nloc, _ = inputs.shape + if self.use_tebd: + # if atype_tebd is not None: + # inputs = paddle.concat([inputs, atype_tebd], axis=-1) + vec_out = self.filter_layers_dipole[0]( + inputs + ) # Shape is [nframes, nloc, m1] + assert list(vec_out.shape) == [nframes, nloc, self.out_dim] + # (nf x nloc) x 1 x od + vec_out = vec_out.reshape([-1, 1, self.out_dim]) + assert gr is not None + # (nf x nloc) x od x 3 + gr = gr.reshape([-1, self.out_dim, 3]) + vec_out = ( + paddle.bmm(vec_out, gr).squeeze(-2).reshape([nframes, nloc, 3]) + ) # Shape is [nframes, nloc, 3] + else: + vec_out = paddle.zeros_like(atype).unsqueeze(-1) # jit assertion + for type_i, filter_layer in enumerate(self.filter_layers_dipole): + mask = atype == type_i + vec_out_type = filter_layer(inputs) # Shape is [nframes, nloc, m1] + vec_out_type = vec_out_type * mask.unsqueeze(-1) + vec_out = vec_out + vec_out_type # Shape is [nframes, natoms[0], 1] + + outs = paddle.zeros_like(atype).unsqueeze(-1) # jit assertion + if self.return_energy: + if self.use_tebd: + atom_energy = self.filter_layers[0](inputs) + self.bias_atom_e[ + atype + ].unsqueeze(-1) + outs = ( + outs.astype(atom_energy.dtype) + atom_energy + ) # Shape is [nframes, natoms[0], 1] + else: + for type_i, filter_layer in enumerate(self.filter_layers): + mask = atype == type_i + atom_energy = filter_layer(inputs) + if not env.ENERGY_BIAS_TRAINABLE: + atom_energy = atom_energy + self.bias_atom_e[type_i] + atom_energy = atom_energy * mask.unsqueeze(-1) + outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] + return { + "energy": outs.to(env.GLOBAL_PD_FLOAT_PRECISION), + "dforce": vec_out, + } diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py new file mode 100644 index 0000000000..63a6ff682e --- /dev/null +++ b/deepmd/pd/model/task/fitting.py @@ -0,0 +1,499 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from abc import ( + abstractmethod, +) +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + FittingNet, + NetworkCollection, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.exclude_mask import ( + AtomExcludeMask, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +class Fitting(paddle.nn.Layer, BaseFitting): + # plugin moved to BaseFitting + + def __new__(cls, *args, **kwargs): + if cls is Fitting: + return BaseFitting.__new__(BaseFitting, *args, **kwargs) + return super().__new__(cls) + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only fitting nets of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "bias_atom_e"): + self.bias_atom_e = base_class.bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + elif shared_level == 1: + # only not share the bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + +class GeneralFitting(Fitting): + """Construct a general fitting net. + + Parameters + ---------- + var_name : str + The atomic property to fit, 'energy', 'dipole', and 'polar'. + ntypes : int + Element count. + dim_descrpt : int + Embedding width per atom. + dim_out : int + The output dimension of the fitting net. + neuron : list[int] + Number of neurons in each hidden layers of the fitting net. + bias_atom_e : paddle.Tensor, optional + Average enery per atom for each element. + resnet_dt : bool + Using time-step in the ResNet construction. + numb_fparam : int + Number of frame parameters. + numb_aparam : int + Number of atomic parameters. + activation_function : str + Activation function. + precision : str + Numerical precision. + mixed_types : bool + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + rcond : float, optional + The condition number for the regression of atomic energy. + seed : int, optional + Random seed. + exclude_types: list[int] + Atomic contributions of the excluded atom types are set zero. + trainable : Union[list[bool], bool] + If the parameters in the fitting net are trainable. + Now this only supports setting all the parameters in the fitting net at one state. + When in list[bool], the trainable will be True only if all the boolean parameters are True. + remove_vaccum_contribution: list[bool], optional + Remove vaccum contribution before the bias is added. The list assigned each + type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same + length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + """ + + def __init__( + self, + var_name: str, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + rcond: Optional[float] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + trainable: Union[bool, list[bool]] = True, + remove_vaccum_contribution: Optional[list[bool]] = None, + type_map: Optional[list[str]] = None, + **kwargs, + ): + super().__init__() + self.var_name = var_name + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt + self.neuron = neuron + self.mixed_types = mixed_types + self.resnet_dt = resnet_dt + self.numb_fparam = numb_fparam + self.numb_aparam = numb_aparam + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.rcond = rcond + self.seed = seed + self.type_map = type_map + # order matters, should be place after the assignment of ntypes + self.reinit_exclude(exclude_types) + self.trainable = trainable + # need support for each layer settings + self.trainable = ( + all(self.trainable) if isinstance(self.trainable, list) else self.trainable + ) + self.remove_vaccum_contribution = remove_vaccum_contribution + + net_dim_out = self._net_out_dim() + # init constants + if bias_atom_e is None: + bias_atom_e = np.zeros([self.ntypes, net_dim_out], dtype=np.float64) + bias_atom_e = paddle.to_tensor(bias_atom_e, dtype=self.prec).to(device=device) + bias_atom_e = bias_atom_e.reshape([self.ntypes, net_dim_out]) + if not self.mixed_types: + assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" + self.register_buffer("bias_atom_e", bias_atom_e) + + if self.numb_fparam > 0: + self.register_buffer( + "fparam_avg", + paddle.zeros([self.numb_fparam], dtype=self.prec).to(device=device), + ) + self.register_buffer( + "fparam_inv_std", + paddle.ones([self.numb_fparam], dtype=self.prec).to(device=device), + ) + else: + self.fparam_avg, self.fparam_inv_std = None, None + if self.numb_aparam > 0: + self.register_buffer( + "aparam_avg", + paddle.zeros([self.numb_aparam], dtype=self.prec).to(device=device), + ) + self.register_buffer( + "aparam_inv_std", + paddle.ones([self.numb_aparam], dtype=self.prec).to(device=device), + ) + else: + self.aparam_avg, self.aparam_inv_std = None, None + + in_dim = self.dim_descrpt + self.numb_fparam + self.numb_aparam + + self.filter_layers = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + net_dim_out, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + # set trainable + for param in self.parameters(): + param.stop_gradient = not self.trainable + + def reinit_exclude( + self, + exclude_types: list[int] = [], + ): + self.exclude_types = exclude_types + self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = paddle.zeros( + extend_shape, + dtype=self.bias_atom_e.dtype, + ).to(device=self.bias_atom_e.place) + self.bias_atom_e = paddle.concat( + [self.bias_atom_e, extend_bias_atom_e], axis=0 + ) + self.bias_atom_e = self.bias_atom_e[remap_index] + + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + "@class": "Fitting", + "@version": 2, + "var_name": self.var_name, + "ntypes": self.ntypes, + "dim_descrpt": self.dim_descrpt, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "numb_fparam": self.numb_fparam, + "numb_aparam": self.numb_aparam, + "activation_function": self.activation_function, + "precision": self.precision, + "mixed_types": self.mixed_types, + "nets": self.filter_layers.serialize(), + "rcond": self.rcond, + "exclude_types": self.exclude_types, + "@variables": { + "bias_atom_e": to_numpy_array(self.bias_atom_e), + "fparam_avg": to_numpy_array(self.fparam_avg), + "fparam_inv_std": to_numpy_array(self.fparam_inv_std), + "aparam_avg": to_numpy_array(self.aparam_avg), + "aparam_inv_std": to_numpy_array(self.aparam_inv_std), + }, + "type_map": self.type_map, + # "tot_ener_zero": self.tot_ener_zero , + # "trainable": self.trainable , + # "atom_ener": self.atom_ener , + # "layer_name": self.layer_name , + # "use_aparam_as_mask": self.use_aparam_as_mask , + # "spin": self.spin , + ## NOTICE: not supported by far + "tot_ener_zero": False, + "trainable": [self.trainable] * (len(self.neuron) + 1), + "layer_name": None, + "use_aparam_as_mask": False, + "spin": None, + } + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + variables = data.pop("@variables") + nets = data.pop("nets") + obj = cls(**data) + for kk in variables.keys(): + obj[kk] = to_paddle_tensor(variables[kk]) + obj.filter_layers = NetworkCollection.deserialize(nets) + return obj + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.numb_fparam + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.numb_aparam + + # make jit happy + exclude_types: list[int] + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + # make jit happy + sel_type: list[int] = [] + for ii in range(self.ntypes): + if ii not in self.exclude_types: + sel_type.append(ii) + return sel_type + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def __setitem__(self, key, value): + if key in ["bias_atom_e"]: + value = value.reshape([self.ntypes, self._net_out_dim()]) + self.bias_atom_e = value + elif key in ["fparam_avg"]: + self.fparam_avg = value + elif key in ["fparam_inv_std"]: + self.fparam_inv_std = value + elif key in ["aparam_avg"]: + self.aparam_avg = value + elif key in ["aparam_inv_std"]: + self.aparam_inv_std = value + elif key in ["scale"]: + self.scale = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["bias_atom_e"]: + return self.bias_atom_e + elif key in ["fparam_avg"]: + return self.fparam_avg + elif key in ["fparam_inv_std"]: + return self.fparam_inv_std + elif key in ["aparam_avg"]: + return self.aparam_avg + elif key in ["aparam_inv_std"]: + return self.aparam_inv_std + elif key in ["scale"]: + return self.scale + else: + raise KeyError(key) + + @abstractmethod + def _net_out_dim(self): + """Set the FittingNet output dim.""" + pass + + def _extend_f_avg_std(self, xx: paddle.Tensor, nb: int) -> paddle.Tensor: + return paddle.tile(xx.reshape([1, self.numb_fparam]), [nb, 1]) + + def _extend_a_avg_std(self, xx: paddle.Tensor, nb: int, nloc: int) -> paddle.Tensor: + return paddle.tile(xx.reshape([1, 1, self.numb_aparam]), [nb, nloc, 1]) + + def _forward_common( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + xx = descriptor + if self.remove_vaccum_contribution is not None: + # TODO: compute the input for vaccm when remove_vaccum_contribution is set + # Idealy, the input for vaccum should be computed; + # we consider it as always zero for convenience. + # Needs a compute_input_stats for vaccum passed from the + # descriptor. + xx_zeros = paddle.zeros_like(xx) + else: + xx_zeros = None + nf, nloc, nd = xx.shape + net_dim_out = self._net_out_dim() + + if nd != self.dim_descrpt: + raise ValueError( + "get an input descriptor of dim {nd}," + "which is not consistent with {self.dim_descrpt}." + ) + # check fparam dim, concate to input descriptor + if self.numb_fparam > 0: + assert fparam is not None, "fparam should not be None" + assert self.fparam_avg is not None + assert self.fparam_inv_std is not None + if fparam.shape[-1] != self.numb_fparam: + raise ValueError( + "get an input fparam of dim {fparam.shape[-1]}, ", + "which is not consistent with {self.numb_fparam}.", + ) + fparam = fparam.reshape([nf, self.numb_fparam]) + nb, _ = fparam.shape + t_fparam_avg = self._extend_f_avg_std(self.fparam_avg, nb) + t_fparam_inv_std = self._extend_f_avg_std(self.fparam_inv_std, nb) + fparam = (fparam - t_fparam_avg) * t_fparam_inv_std + fparam = paddle.tile(fparam.reshape([nf, 1, -1]), [1, nloc, 1]) + xx = paddle.concat( + [xx, fparam], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, fparam], + axis=-1, + ) + # check aparam dim, concate to input descriptor + if self.numb_aparam > 0: + assert aparam is not None, "aparam should not be None" + assert self.aparam_avg is not None + assert self.aparam_inv_std is not None + if aparam.shape[-1] != self.numb_aparam: + raise ValueError( + f"get an input aparam of dim {aparam.shape[-1]}, ", + f"which is not consistent with {self.numb_aparam}.", + ) + aparam = aparam.reshape([nf, -1, self.numb_aparam]) + nb, nloc, _ = aparam.shape + t_aparam_avg = self._extend_a_avg_std(self.aparam_avg, nb, nloc) + t_aparam_inv_std = self._extend_a_avg_std(self.aparam_inv_std, nb, nloc) + aparam = (aparam - t_aparam_avg) * t_aparam_inv_std + xx = paddle.concat( + [xx, aparam], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, aparam], + axis=-1, + ) + + outs = paddle.zeros( + (nf, nloc, net_dim_out), + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(device=descriptor.place) # jit assertion + if self.mixed_types: + atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] + if xx_zeros is not None: + atom_property -= self.filter_layers.networks[0](xx_zeros) + outs = outs + atom_property # Shape is [nframes, natoms[0], net_dim_out] + else: + for type_i, ll in enumerate(self.filter_layers.networks): + mask = (atype == type_i).unsqueeze(-1) + mask.stop_gradient = True + mask = paddle.tile(mask, (1, 1, net_dim_out)) + atom_property = ll(xx) + if xx_zeros is not None: + # must assert, otherwise jit is not happy + assert self.remove_vaccum_contribution is not None + if not ( + len(self.remove_vaccum_contribution) > type_i + and not self.remove_vaccum_contribution[type_i] + ): + atom_property -= ll(xx_zeros) + atom_property = atom_property + self.bias_atom_e[type_i] + atom_property = atom_property * mask.astype(atom_property.dtype) + outs = ( + outs + atom_property + ) # Shape is [nframes, natoms[0], net_dim_out] + # nf x nloc + mask = self.emask(atype) + # nf x nloc x nod + outs = outs * mask[:, :, None].astype(outs.dtype) + return {self.var_name: outs.astype(env.GLOBAL_PD_FLOAT_PRECISION)} diff --git a/deepmd/pd/model/task/invar_fitting.py b/deepmd/pd/model/task/invar_fitting.py new file mode 100644 index 0000000000..5a6cad7c2d --- /dev/null +++ b/deepmd/pd/model/task/invar_fitting.py @@ -0,0 +1,181 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from typing import ( + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) +from deepmd.pd.model.task.fitting import ( + GeneralFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +@GeneralFitting.register("invar") +@fitting_check_output +class InvarFitting(GeneralFitting): + """Construct a fitting net for energy. + + Parameters + ---------- + var_name : str + The atomic property to fit, 'energy', 'dipole', and 'polar'. + ntypes : int + Element count. + dim_descrpt : int + Embedding width per atom. + dim_out : int + The output dimension of the fitting net. + neuron : List[int] + Number of neurons in each hidden layers of the fitting net. + bias_atom_e : paddle.Tensor, optional + Average enery per atom for each element. + resnet_dt : bool + Using time-step in the ResNet construction. + numb_fparam : int + Number of frame parameters. + numb_aparam : int + Number of atomic parameters. + activation_function : str + Activation function. + precision : str + Numerical precision. + mixed_types : bool + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + rcond : float, optional + The condition number for the regression of atomic energy. + seed : int, optional + Random seed. + exclude_types: List[int] + Atomic contributions of the excluded atom types are set zero. + atom_ener: List[Optional[paddle.Tensor]], optional + Specifying atomic energy contribution in vacuum. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + + """ + + def __init__( + self, + var_name: str, + ntypes: int, + dim_descrpt: int, + dim_out: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + rcond: Optional[float] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + atom_ener: Optional[list[Optional[paddle.Tensor]]] = None, + type_map: Optional[list[str]] = None, + **kwargs, + ): + self.dim_out = dim_out + self.atom_ener = atom_ener + super().__init__( + var_name=var_name, + ntypes=ntypes, + dim_descrpt=dim_descrpt, + neuron=neuron, + bias_atom_e=bias_atom_e, + resnet_dt=resnet_dt, + numb_fparam=numb_fparam, + numb_aparam=numb_aparam, + activation_function=activation_function, + precision=precision, + mixed_types=mixed_types, + rcond=rcond, + seed=seed, + exclude_types=exclude_types, + remove_vaccum_contribution=None + if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 + else [x is not None for x in atom_ener], + type_map=type_map, + **kwargs, + ) + + def _net_out_dim(self): + """Set the FittingNet output dim.""" + return self.dim_out + + def serialize(self) -> dict: + data = super().serialize() + data["type"] = "invar" + data["dim_out"] = self.dim_out + data["atom_ener"] = self.atom_ener + return data + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + return super().deserialize(data) + + def output_def(self) -> FittingOutputDef: + return FittingOutputDef( + [ + OutputVariableDef( + self.var_name, + [self.dim_out], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + + Returns + ------- + - `paddle.Tensor`: Total energy with shape [nframes, natoms[0]]. + """ + return self._forward_common(descriptor, atype, gr, g2, h2, fparam, aparam) + + # make jit happy with paddle 2.0.0 + exclude_types: list[int] diff --git a/deepmd/pd/model/task/task.py b/deepmd/pd/model/task/task.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/model/task/task.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/model/task/type_predict.py b/deepmd/pd/model/task/type_predict.py new file mode 100644 index 0000000000..241d4837d5 --- /dev/null +++ b/deepmd/pd/model/task/type_predict.py @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.pd.model.network.network import ( + MaskLMHead, +) +from deepmd.pd.model.task import ( + Fitting, +) + + +class TypePredictNet(Fitting): + def __init__(self, feature_dim, ntypes, activation_function="gelu", **kwargs): + """Construct a type predict net. + + Args: + - feature_dim: Input dm. + - ntypes: Numer of types to predict. + - activation_function: Activate function. + """ + super().__init__() + self.feature_dim = feature_dim + self.ntypes = ntypes + self.lm_head = MaskLMHead( + embed_dim=self.feature_dim, + output_dim=ntypes, + activation_fn=activation_function, + weight=None, + ) + + def forward(self, features, masked_tokens: Optional[paddle.Tensor] = None): + """Calculate the predicted logits. + Args: + - features: Input features with shape [nframes, nloc, feature_dim]. + - masked_tokens: Input masked tokens with shape [nframes, nloc]. + + Returns + ------- + - logits: Predicted probs with shape [nframes, nloc, ntypes]. + """ + # [nframes, nloc, ntypes] + logits = self.lm_head(features, masked_tokens=masked_tokens) + return logits diff --git a/deepmd/pd/train/__init__.py b/deepmd/pd/train/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/train/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py new file mode 100644 index 0000000000..72ffdf404c --- /dev/null +++ b/deepmd/pd/train/training.py @@ -0,0 +1,1215 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import functools +import logging +import time +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) +from typing import ( + Any, +) + +import numpy as np +import paddle +import paddle.distributed as dist +from paddle.distributed import ( + fleet, +) +from paddle.io import ( + DataLoader, +) + +from deepmd.common import ( + symlink_prefix_files, +) +from deepmd.loggers.training import ( + format_training_message, + format_training_message_per_task, +) +from deepmd.pd.loss import ( + EnergyStdLoss, + TaskLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + dp_random, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + get_weighted_sampler, +) +from deepmd.pd.utils.env import ( + CINN, + DEVICE, + JIT, + NUM_WORKERS, + SAMPLER_RECORD, + enable_prim, +) +from deepmd.pd.utils.learning_rate import ( + LearningRateExp, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.utils.path import ( + DPH5Path, +) + +# if paddle.__version__.startswith("2"): +# import paddle._dynamo + + +log = logging.getLogger(__name__) + + +class Trainer: + def __init__( + self, + config: dict[str, Any], + training_data, + stat_file_path=None, + validation_data=None, + init_model=None, + restart_model=None, + finetune_model=None, + force_load=False, + shared_links=None, + finetune_links=None, + init_frz_model=None, + ): + """Construct a DeePMD trainer. + + Args: + - config: The Dict-like configuration with training options. + """ + enable_prim(True) + if init_model is not None: + resume_model = init_model + elif restart_model is not None: + resume_model = restart_model + elif finetune_model is not None: + resume_model = finetune_model + else: + resume_model = None + resuming = resume_model is not None + self.restart_training = restart_model is not None + model_params = config["model"] + training_params = config["training"] + self.multi_task = "model_dict" in model_params + self.finetune_links = finetune_links + self.finetune_update_stat = False + self.model_keys = ( + list(model_params["model_dict"]) if self.multi_task else ["Default"] + ) + self.rank = ( + dist.get_rank() if dist.is_available() and dist.is_initialized() else 0 + ) + self.world_size = ( + dist.get_world_size() + if dist.is_available() and dist.is_initialized() + else 1 + ) + self.num_model = len(self.model_keys) + + # Iteration config + self.num_steps = training_params["numb_steps"] + self.disp_file = training_params.get("disp_file", "lcurve.out") + self.disp_freq = training_params.get("disp_freq", 1000) + self.save_ckpt = training_params.get("save_ckpt", "model.ckpt") + self.save_freq = training_params.get("save_freq", 1000) + self.max_ckpt_keep = training_params.get("max_ckpt_keep", 5) + self.display_in_training = training_params.get("disp_training", True) + self.timing_in_training = training_params.get("time_training", True) + self.change_bias_after_training = training_params.get( + "change_bias_after_training", False + ) + self.lcurve_should_print_header = True + + def get_opt_param(params): + opt_type = params.get("opt_type", "Adam") + opt_param = { + "kf_blocksize": params.get("kf_blocksize", 5120), + "kf_start_pref_e": params.get("kf_start_pref_e", 1), + "kf_limit_pref_e": params.get("kf_limit_pref_e", 1), + "kf_start_pref_f": params.get("kf_start_pref_f", 1), + "kf_limit_pref_f": params.get("kf_limit_pref_f", 1), + } + return opt_type, opt_param + + def get_data_loader(_training_data, _validation_data, _training_params): + def get_dataloader_and_buffer(_data, _params): + if "auto_prob" in _training_params["training_data"]: + _sampler = get_weighted_sampler( + _data, _params["training_data"]["auto_prob"] + ) + elif "sys_probs" in _training_params["training_data"]: + _sampler = get_weighted_sampler( + _data, + _params["training_data"]["sys_probs"], + sys_prob=True, + ) + else: + _sampler = get_weighted_sampler(_data, "prob_sys_size") + + if _sampler is None: + log.warning( + "Sampler not specified!" + ) # None sampler will lead to a premature stop iteration. Replacement should be True in attribute of the sampler to produce expected number of items in one iteration. + _dataloader = DataLoader( + _data, + batch_sampler=paddle.io.BatchSampler( + sampler=_sampler, + drop_last=False, + ), + num_workers=NUM_WORKERS + if dist.is_available() + else 0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda batch: batch[0], # prevent extra conversion + # pin_memory=True, + ) + _data_buffered = BufferedIterator(iter(_dataloader)) + return _dataloader, _data_buffered + + training_dataloader, training_data_buffered = get_dataloader_and_buffer( + _training_data, _training_params + ) + + if _validation_data is not None: + ( + validation_dataloader, + validation_data_buffered, + ) = get_dataloader_and_buffer(_validation_data, _training_params) + valid_numb_batch = _training_params["validation_data"].get( + "numb_btch", 1 + ) + else: + validation_dataloader = None + validation_data_buffered = None + valid_numb_batch = 1 + return ( + training_dataloader, + training_data_buffered, + validation_dataloader, + validation_data_buffered, + valid_numb_batch, + ) + + def single_model_stat( + _model, + _data_stat_nbatch, + _training_data, + _validation_data, + _stat_file_path, + _data_requirement, + finetune_has_new_type=False, + ): + _data_requirement += get_additional_data_requirement(_model) + _training_data.add_data_requirement(_data_requirement) + if _validation_data is not None: + _validation_data.add_data_requirement(_data_requirement) + + @functools.lru_cache + def get_sample(): + sampled = make_stat_input( + _training_data.systems, + _training_data.dataloaders, + _data_stat_nbatch, + ) + return sampled + + if (not resuming or finetune_has_new_type) and self.rank == 0: + _model.compute_or_load_stat( + sampled_func=get_sample, + stat_file_path=_stat_file_path, + ) + if isinstance(_stat_file_path, DPH5Path): + _stat_file_path.root.close() + return get_sample + + def get_lr(lr_params): + assert ( + lr_params.get("type", "exp") == "exp" + ), "Only learning rate `exp` is supported!" + lr_params["stop_steps"] = self.num_steps - self.warmup_steps + lr_exp = LearningRateExp(**lr_params) + return lr_exp + + # Optimizer + if self.multi_task and training_params.get("optim_dict", None) is not None: + self.optim_dict = training_params.get("optim_dict") + missing_keys = [ + key for key in self.model_keys if key not in self.optim_dict + ] + assert ( + not missing_keys + ), f"These keys are not in optim_dict: {missing_keys}!" + self.opt_type = {} + self.opt_param = {} + for model_key in self.model_keys: + self.opt_type[model_key], self.opt_param[model_key] = get_opt_param( + self.optim_dict[model_key] + ) + else: + self.opt_type, self.opt_param = get_opt_param(training_params) + + # Model + self.model = get_model_for_wrapper(model_params) + + # Loss + if not self.multi_task: + self.loss = get_loss( + config["loss"], + config["learning_rate"]["start_lr"], + len(model_params["type_map"]), + self.model, + ) + else: + self.loss = {} + for model_key in self.model_keys: + loss_param = config["loss_dict"][model_key] + if config.get("learning_rate_dict", None) is not None: + lr_param = config["learning_rate_dict"][model_key]["start_lr"] + else: + lr_param = config["learning_rate"]["start_lr"] + ntypes = len(model_params["model_dict"][model_key]["type_map"]) + self.loss[model_key] = get_loss( + loss_param, lr_param, ntypes, self.model[model_key] + ) + + # Data + if not self.multi_task: + self.get_sample_func = single_model_stat( + self.model, + model_params.get("data_stat_nbatch", 10), + training_data, + validation_data, + stat_file_path, + self.loss.label_requirement, + finetune_has_new_type=self.finetune_links["Default"].get_has_new_type() + if self.finetune_links is not None + else False, + ) + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + ) = get_data_loader(training_data, validation_data, training_params) + training_data.print_summary( + "training", + to_numpy_array(self.training_dataloader.batch_sampler.sampler.weights), + ) + if validation_data is not None: + validation_data.print_summary( + "validation", + to_numpy_array( + self.validation_dataloader.batch_sampler.sampler.weights + ), + ) + else: + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + self.get_sample_func, + ) = {}, {}, {}, {}, {}, {} + for model_key in self.model_keys: + self.get_sample_func[model_key] = single_model_stat( + self.model[model_key], + model_params["model_dict"][model_key].get("data_stat_nbatch", 10), + training_data[model_key], + validation_data[model_key], + stat_file_path[model_key], + self.loss[model_key].label_requirement, + finetune_has_new_type=self.finetune_links[ + model_key + ].get_has_new_type() + if self.finetune_links is not None + else False, + ) + ( + self.training_dataloader[model_key], + self.training_data[model_key], + self.validation_dataloader[model_key], + self.validation_data[model_key], + self.valid_numb_batch[model_key], + ) = get_data_loader( + training_data[model_key], + validation_data[model_key], + training_params["data_dict"][model_key], + ) + + training_data[model_key].print_summary( + f"training in {model_key}", + to_numpy_array( + self.training_dataloader[ + model_key + ].batch_sampler.sampler.weights + ), + ) + if ( + validation_data is not None + and validation_data[model_key] is not None + ): + validation_data[model_key].print_summary( + f"validation in {model_key}", + to_numpy_array( + self.validation_dataloader[ + model_key + ].batch_sampler.sampler.weights + ), + ) + + # Learning rate + self.warmup_steps = training_params.get("warmup_steps", 0) + self.gradient_max_norm = training_params.get("gradient_max_norm", 0.0) + assert ( + self.num_steps - self.warmup_steps > 0 or self.warmup_steps == 0 + ), "Warm up steps must be less than total training steps!" + if self.multi_task and config.get("learning_rate_dict", None) is not None: + self.lr_exp = {} + for model_key in self.model_keys: + self.lr_exp[model_key] = get_lr(config["learning_rate_dict"][model_key]) + else: + self.lr_exp = get_lr(config["learning_rate"]) + + # JIT + if JIT: + raise NotImplementedError( + "JIT is not supported yet when training with Paddle" + ) + self.model = paddle.jit.to_static(self.model) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model, self.loss, model_params=model_params) + self.start_step = 0 + + # resuming and finetune + optimizer_state_dict = None + if resuming: + log.info(f"Resuming from {resume_model}.") + state_dict = paddle.load(resume_model) + if "model" in state_dict: + optimizer_state_dict = ( + state_dict["optimizer"] if finetune_model is None else None + ) + state_dict = state_dict["model"] + self.start_step = ( + state_dict["_extra_state"]["train_infos"]["step"] + if self.restart_training + else 0 + ) + if self.rank == 0: + if force_load: + input_keys = list(state_dict.keys()) + target_keys = list(self.wrapper.state_dict().keys()) + missing_keys = [ + item for item in target_keys if item not in input_keys + ] + if missing_keys: + target_state_dict = self.wrapper.state_dict() + slim_keys = [] + for item in missing_keys: + state_dict[item] = target_state_dict[item].clone().detach() + new_key = True + for slim_key in slim_keys: + if slim_key in item: + new_key = False + break + if new_key: + tmp_keys = ".".join(item.split(".")[:3]) + slim_keys.append(tmp_keys) + slim_keys = [i + ".*" for i in slim_keys] + log.warning( + f"Force load mode allowed! These keys are not in ckpt and will re-init: {slim_keys}" + ) + # update model params in the pretrained model + if finetune_model is not None: + new_state_dict = {} + target_state_dict = self.wrapper.state_dict() + # pretrained_model + pretrained_model = get_model_for_wrapper( + state_dict["_extra_state"]["model_params"] + ) + pretrained_model_wrapper = ModelWrapper(pretrained_model) + pretrained_model_wrapper.set_state_dict(state_dict) + # update type related params + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + _model_key_from = finetune_rule_single.get_model_branch() + # skip if updated + if ( + finetune_rule_single.get_finetune_tmap() + != pretrained_model_wrapper.model[ + _model_key_from + ].get_type_map() + ): + model_with_new_type_stat = None + if finetune_rule_single.get_has_new_type(): + self.finetune_update_stat = True + model_with_new_type_stat = self.wrapper.model[model_key] + pretrained_model_wrapper.model[ + _model_key_from + ].change_type_map( + finetune_rule_single.get_finetune_tmap(), + model_with_new_type_stat=model_with_new_type_stat, + ) + state_dict = pretrained_model_wrapper.state_dict() + + def collect_single_finetune_params( + _model_key, + _finetune_rule_single, + _new_state_dict, + _origin_state_dict, + _random_state_dict, + ): + _new_fitting = _finetune_rule_single.get_random_fitting() + _model_key_from = _finetune_rule_single.get_model_branch() + target_keys = [ + i + for i in _random_state_dict.keys() + if i != "_extra_state" and f".{_model_key}." in i + ] + for item_key in target_keys: + if _new_fitting and (".descriptor." not in item_key): + # print(f'Keep {item_key} in old model!') + _new_state_dict[item_key] = ( + _random_state_dict[item_key].clone().detach() + ) + else: + new_key = item_key.replace( + f".{_model_key}.", f".{_model_key_from}." + ) + # print(f'Replace {item_key} with {new_key} in pretrained_model!') + _new_state_dict[item_key] = ( + _origin_state_dict[new_key].clone().detach() + ) + + # collect model params from the pretrained model + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + collect_single_finetune_params( + model_key, + finetune_rule_single, + new_state_dict, + state_dict, + target_state_dict, + ) + state_dict = new_state_dict + state_dict["_extra_state"] = self.wrapper.state_dict()[ + "_extra_state" + ] + + self.wrapper.set_state_dict(state_dict) + + # change bias for fine-tuning + if finetune_model is not None: + + def single_model_finetune( + _model, + _finetune_rule_single, + _sample_func, + ): + _model = model_change_out_bias( + _model, + _sample_func, + _bias_adjust_mode="change-by-statistic" + if not _finetune_rule_single.get_random_fitting() + else "set-by-statistic", + ) + return _model + + if not self.multi_task: + finetune_rule_single = self.finetune_links["Default"] + self.model = single_model_finetune( + self.model, finetune_rule_single, self.get_sample_func + ) + else: + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + if not finetune_rule_single.get_resuming(): + log.info( + f"Model branch {model_key} will be fine-tuned. This may take a long time..." + ) + self.model[model_key] = single_model_finetune( + self.model[model_key], + finetune_rule_single, + self.get_sample_func[model_key], + ) + else: + log.info( + f"Model branch {model_key} will resume training." + ) + + if init_frz_model is not None: + frz_model = paddle.jit.load(init_frz_model) + self.model.set_state_dict(frz_model.state_dict()) + + # Multi-task share params + if shared_links is not None: + self.wrapper.share_params( + shared_links, + resume=(resuming and not self.finetune_update_stat) or self.rank != 0, + ) + + # TODO add lr warmups for multitask + # author: iProzd + def warm_up_linear(step, warmup_steps): + if step < warmup_steps: + return step / warmup_steps + else: + return self.lr_exp.value(step - warmup_steps) / self.lr_exp.start_lr + + # TODO add optimizers for multitask + # author: iProzd + if self.opt_type == "Adam": + self.scheduler = paddle.optimizer.lr.LambdaDecay( + learning_rate=self.lr_exp.start_lr, + lr_lambda=lambda step: warm_up_linear( + step + self.start_step, self.warmup_steps + ), + ) + self.optimizer = paddle.optimizer.Adam( + learning_rate=self.scheduler, parameters=self.wrapper.parameters() + ) + if optimizer_state_dict is not None and self.restart_training: + self.optimizer.set_state_dict(optimizer_state_dict) + else: + raise ValueError(f"Not supported optimizer type '{self.opt_type}'") + + if dist.is_available() and dist.is_initialized(): + # DDP will guarantee the model parameters are identical across all processes + self.wrapper = fleet.distributed_model( + self.wrapper, + # find_unused_parameters=True, + ) + self.optimizer = fleet.distributed_optimizer(self.optimizer) + + if CINN: + from paddle import ( + static, + ) + + build_strategy = static.BuildStrategy() + build_strategy.build_cinn_pass = CINN + self.wrapper.forward = paddle.jit.to_static( + self.wrapper.forward, + build_strategy=build_strategy, + full_graph=True, + )(self.wrapper.forward) + + # Get model prob for multi-task + if self.multi_task: + self.model_prob = np.array([0.0 for key in self.model_keys]) + if training_params.get("model_prob", None) is not None: + model_prob = training_params["model_prob"] + for ii, model_key in enumerate(self.model_keys): + if model_key in model_prob: + self.model_prob[ii] += float(model_prob[model_key]) + else: + for ii, model_key in enumerate(self.model_keys): + self.model_prob[ii] += float(len(self.training_data[model_key])) + sum_prob = np.sum(self.model_prob) + assert sum_prob > 0.0, "Sum of model prob must be larger than 0!" + self.model_prob = self.model_prob / sum_prob + + # Tensorboard + self.enable_tensorboard = training_params.get("tensorboard", False) + self.tensorboard_log_dir = training_params.get("tensorboard_log_dir", "log") + self.tensorboard_freq = training_params.get("tensorboard_freq", 1) + self.enable_profiler = training_params.get("enable_profiler", False) + self.profiling = training_params.get("profiling", False) + self.profiling_file = training_params.get("profiling_file", "timeline.json") + + def run(self): + fout = ( + open( + self.disp_file, + mode="w" if not self.restart_training else "a", + buffering=1, + ) + if self.rank == 0 + else None + ) # line buffered + if SAMPLER_RECORD: + record_file = f"Sample_rank_{self.rank}.txt" + fout1 = open(record_file, mode="w", buffering=1) + log.info("Start to train %d steps.", self.num_steps) + if dist.is_available() and dist.is_initialized(): + log.info(f"Rank: {dist.get_rank()}/{dist.get_world_size()}") + if self.enable_tensorboard: + from tensorboardX import ( + SummaryWriter, + ) + + writer = SummaryWriter(log_dir=self.tensorboard_log_dir) + if self.enable_profiler or self.profiling: + prof = paddle.profiler.profile( + schedule=paddle.profiler.schedule(wait=1, warmup=1, active=3, repeat=1), + on_trace_ready=paddle.profiler.tensorboard_trace_handler( + self.tensorboard_log_dir + ) + if self.enable_profiler + else None, + record_shapes=True, + with_stack=True, + ) + prof.start() + + def step(_step_id, task_key="Default"): + # Paddle Profiler + if self.enable_profiler or self.profiling: + prof.step() + self.wrapper.train() + if isinstance(self.lr_exp, dict): + _lr = self.lr_exp[task_key] + else: + _lr = self.lr_exp + cur_lr = _lr.value(_step_id) + pref_lr = cur_lr + self.optimizer.clear_grad(set_to_zero=False) + input_dict, label_dict, log_dict = self.get_data( + is_train=True, task_key=task_key + ) + if SAMPLER_RECORD: + print_str = f"Step {_step_id}: sample system{log_dict['sid']} frame{log_dict['fid']}\n" + fout1.write(print_str) + fout1.flush() + if self.opt_type == "Adam": + cur_lr = self.scheduler.get_lr() + if _step_id < self.warmup_steps: + pref_lr = _lr.start_lr + else: + pref_lr = cur_lr + model_pred, loss, more_loss = self.wrapper( + **input_dict, cur_lr=pref_lr, label=label_dict, task_key=task_key + ) + loss.backward() + if self.gradient_max_norm > 0.0: + grad_norm = paddle.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), self.gradient_max_norm + ) + if not paddle.isfinite(grad_norm).all(): + # check local gradnorm single GPU case, trigger NanDetector + raise FloatingPointError("gradients are Nan/Inf") + self.optimizer.step() + self.scheduler.step() + else: + raise ValueError(f"Not supported optimizer type '{self.opt_type}'") + + # Log and persist + display_step_id = _step_id + 1 + if self.display_in_training and ( + display_step_id % self.disp_freq == 0 or display_step_id == 1 + ): + self.wrapper.eval() + + def log_loss_train(_loss, _more_loss, _task_key="Default"): + results = {} + rmse_val = { + item: _more_loss[item] + for item in _more_loss + if "l2_" not in item + } + for item in sorted(rmse_val.keys()): + results[item] = rmse_val[item] + return results + + def log_loss_valid(_task_key="Default"): + single_results = {} + sum_natoms = 0 + if not self.multi_task: + valid_numb_batch = self.valid_numb_batch + else: + valid_numb_batch = self.valid_numb_batch[_task_key] + for ii in range(valid_numb_batch): + self.optimizer.clear_grad() + input_dict, label_dict, _ = self.get_data( + is_train=False, task_key=_task_key + ) + if input_dict == {}: + # no validation data + return {} + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_task_key, + ) + # more_loss.update({"rmse": math.sqrt(loss)}) + natoms = int(input_dict["atype"].shape[-1]) + sum_natoms += natoms + for k, v in more_loss.items(): + if "l2_" not in k: + single_results[k] = ( + single_results.get(k, 0.0) + v * natoms + ) + results = {k: v / sum_natoms for k, v in single_results.items()} + return results + + if not self.multi_task: + train_results = log_loss_train(loss, more_loss) + valid_results = log_loss_valid() + if self.rank == 0: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name="trn", + rmse=train_results, + learning_rate=cur_lr, + ) + ) + if valid_results: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name="val", + rmse=valid_results, + learning_rate=None, + ) + ) + else: + train_results = {_key: {} for _key in self.model_keys} + valid_results = {_key: {} for _key in self.model_keys} + train_results[task_key] = log_loss_train( + loss, more_loss, _task_key=task_key + ) + for _key in self.model_keys: + if _key != task_key: + self.optimizer.clear_grad() + input_dict, label_dict, _ = self.get_data( + is_train=True, task_key=_key + ) + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_key, + ) + train_results[_key] = log_loss_train( + loss, more_loss, _task_key=_key + ) + valid_results[_key] = log_loss_valid(_task_key=_key) + if self.rank == 0: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name=_key + "_trn", + rmse=train_results[_key], + learning_rate=cur_lr, + ) + ) + if valid_results[_key]: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name=_key + "_val", + rmse=valid_results[_key], + learning_rate=None, + ) + ) + + current_time = time.time() + train_time = current_time - self.t0 + self.t0 = current_time + if self.rank == 0 and self.timing_in_training: + log.info( + format_training_message( + batch=display_step_id, + wall_time=train_time, + ) + ) + # the first training time is not accurate + if ( + (_step_id + 1 - self.start_step) > self.disp_freq + or self.num_steps - self.start_step < 2 * self.disp_freq + ): + self.total_train_time += train_time + + if fout: + if self.lcurve_should_print_header: + self.print_header(fout, train_results, valid_results) + self.lcurve_should_print_header = False + self.print_on_training( + fout, display_step_id, cur_lr, train_results, valid_results + ) + + if ( + ((_step_id + 1) % self.save_freq == 0 and _step_id != self.start_step) + or (_step_id + 1) == self.num_steps + ) and (self.rank == 0 or dist.get_rank() == 0): + # Handle the case if rank 0 aborted and re-assigned + self.latest_model = Path(self.save_ckpt + f"-{_step_id + 1}.pd") + + module = ( + self.wrapper.module + if dist.is_available() and dist.is_initialized() + else self.wrapper + ) + self.save_model(self.latest_model, lr=cur_lr, step=_step_id) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + # tensorboard + if self.enable_tensorboard and ( + display_step_id % self.tensorboard_freq == 0 or display_step_id == 1 + ): + writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id) + writer.add_scalar(f"{task_key}/loss", loss, display_step_id) + for item in more_loss: + writer.add_scalar( + f"{task_key}/{item}", more_loss[item].item(), _step_id + ) + + self.t0 = time.time() + self.total_train_time = 0.0 + for step_id in range(self.num_steps): + if step_id < self.start_step: + continue + if self.multi_task: + chosen_index_list = dp_random.choice( + np.arange( + self.num_model, dtype=np.int32 + ), # int32 should be enough for # models... + p=np.array(self.model_prob), + size=self.world_size, + replace=True, + ) + assert chosen_index_list.size == self.world_size + model_index = chosen_index_list[self.rank] + model_key = self.model_keys[model_index] + else: + model_key = "Default" + step(step_id, model_key) + if JIT: + break + + if self.change_bias_after_training and (self.rank == 0 or dist.get_rank() == 0): + if not self.multi_task: + self.model = model_change_out_bias( + self.model, + self.get_sample_func, + _bias_adjust_mode="change-by-statistic", + ) + else: + for model_key in self.model_keys: + self.model[model_key] = model_change_out_bias( + self.model[model_key], + self.get_sample_func[model_key], + _bias_adjust_mode="change-by-statistic", + ) + self.latest_model = Path(self.save_ckpt + f"-{self.num_steps}.pd") + cur_lr = self.lr_exp.value(self.num_steps - 1) + self.save_model(self.latest_model, lr=cur_lr, step=self.num_steps - 1) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + if ( + self.rank == 0 or dist.get_rank() == 0 + ): # Handle the case if rank 0 aborted and re-assigned + if self.num_steps == 0: + # when num_steps is 0, the checkpoint is never not saved + self.latest_model = Path(self.save_ckpt + "-0.pd") + self.save_model(self.latest_model, lr=0, step=0) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + elapsed_batch = self.num_steps - self.start_step + if self.timing_in_training and elapsed_batch // self.disp_freq > 0: + if self.start_step >= 2 * self.disp_freq: + log.info( + "average training time: %.4f s/batch (exclude first %d batches)", + self.total_train_time + / ( + elapsed_batch // self.disp_freq * self.disp_freq + - self.disp_freq + ), + self.disp_freq, + ) + else: + log.info( + "average training time: %.4f s/batch", + self.total_train_time + / (elapsed_batch // self.disp_freq * self.disp_freq), + ) + + if JIT: + raise NotImplementedError( + "Paddle JIT saving during training is not supported yet." + ) + log.info(f"Trained model has been saved to: {self.save_ckpt}") + + if fout: + fout.close() + if SAMPLER_RECORD: + fout1.close() + if self.enable_tensorboard: + writer.close() + if self.enable_profiler or self.profiling: + prof.stop() + if self.profiling: + prof.export_chrome_trace(self.profiling_file) + log.info( + f"The profiling trace have been saved to: {self.profiling_file}" + ) + + def save_model(self, save_path: Path, lr=0.0, step=0): + module = ( + self.wrapper.module + if dist.is_available() and dist.is_initialized() + else self.wrapper + ) + module.train_infos["lr"] = lr + module.train_infos["step"] = step + paddle.save( + {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, + str(save_path), + ) + checkpoint_dir = save_path.parent + checkpoint_files = [ + f + for f in checkpoint_dir.glob("*.pd") + if not f.is_symlink() and f.name.startswith(self.save_ckpt) + ] + if len(checkpoint_files) > self.max_ckpt_keep: + checkpoint_files.sort(key=lambda x: x.stat().st_mtime) + checkpoint_files[0].unlink() + + def get_data(self, is_train=True, task_key="Default"): + if not self.multi_task: + if is_train: + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator( + iter(self.training_dataloader) + ) + batch_data = next(iter(self.training_data)) + else: + if self.validation_data is None: + return {}, {}, {} + try: + batch_data = next(iter(self.validation_data)) + except StopIteration: + self.validation_data = BufferedIterator( + iter(self.validation_dataloader) + ) + batch_data = next(iter(self.validation_data)) + else: + if is_train: + try: + batch_data = next(iter(self.training_data[task_key])) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data[task_key] = BufferedIterator( + iter(self.training_dataloader[task_key]) + ) + batch_data = next(iter(self.training_data[task_key])) + else: + if self.validation_data[task_key] is None: + return {}, {}, {} + try: + batch_data = next(iter(self.validation_data[task_key])) + except StopIteration: + self.validation_data[task_key] = BufferedIterator( + iter(self.validation_dataloader[task_key]) + ) + batch_data = next(iter(self.validation_data[task_key])) + + for key in batch_data.keys(): + if key == "sid" or key == "fid" or key == "box" or "find_" in key: + continue + elif not isinstance(batch_data[key], list): + if batch_data[key] is not None: + batch_data[key] = batch_data[key].to(DEVICE) + else: + batch_data[key] = [item.to(DEVICE) for item in batch_data[key]] + # we may need a better way to classify which are inputs and which are labels + # now wrapper only supports the following inputs: + input_keys = [ + "coord", + "atype", + "spin", + "box", + "fparam", + "aparam", + ] + input_dict = {item_key: None for item_key in input_keys} + label_dict = {} + for item_key in batch_data: + if item_key in input_keys: + input_dict[item_key] = batch_data[item_key] + else: + if item_key not in ["sid", "fid"]: + label_dict[item_key] = batch_data[item_key] + log_dict = {} + if "fid" in batch_data: + log_dict["fid"] = batch_data["fid"] + log_dict["sid"] = batch_data["sid"] + return input_dict, label_dict, log_dict + + def print_header(self, fout, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "# %5s" % "step" + if not self.multi_task: + if valid_results: + prop_fmt = " %11s %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_val", k + "_trn") + else: + prop_fmt = " %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_trn") + else: + for model_key in self.model_keys: + if valid_results[model_key]: + prop_fmt = " %11s %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % ( + k + f"_val_{model_key}", + k + f"_trn_{model_key}", + ) + else: + prop_fmt = " %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (k + f"_trn_{model_key}") + print_str += " %8s\n" % "lr" + print_str += "# If there is no available reference data, rmse_*_{val,trn} will print nan\n" + fout.write(print_str) + fout.flush() + + def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "%7d" % step_id + if not self.multi_task: + if valid_results: + prop_fmt = " %11.2e %11.2e" + for k in train_keys: + print_str += prop_fmt % (valid_results[k], train_results[k]) + else: + prop_fmt = " %11.2e" + for k in train_keys: + print_str += prop_fmt % (train_results[k]) + else: + for model_key in self.model_keys: + if valid_results[model_key]: + prop_fmt = " %11.2e %11.2e" + for k in sorted(valid_results[model_key].keys()): + print_str += prop_fmt % ( + valid_results[model_key][k], + train_results[model_key][k], + ) + else: + prop_fmt = " %11.2e" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (train_results[model_key][k]) + print_str += f" {cur_lr:8.1e}\n" + fout.write(print_str) + fout.flush() + + +def get_additional_data_requirement(_model): + additional_data_requirement = [] + if _model.get_dim_fparam() > 0: + fparam_requirement_items = [ + DataRequirementItem( + "fparam", _model.get_dim_fparam(), atomic=False, must=True + ) + ] + additional_data_requirement += fparam_requirement_items + if _model.get_dim_aparam() > 0: + aparam_requirement_items = [ + DataRequirementItem( + "aparam", _model.get_dim_aparam(), atomic=True, must=True + ) + ] + additional_data_requirement += aparam_requirement_items + has_spin = getattr(_model, "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + if has_spin: + spin_requirement_items = [ + DataRequirementItem("spin", ndof=3, atomic=True, must=True) + ] + additional_data_requirement += spin_requirement_items + return additional_data_requirement + + +def get_loss(loss_params, start_lr, _ntypes, _model): + loss_type = loss_params.get("type", "ener") + if loss_type == "ener": + loss_params["starter_learning_rate"] = start_lr + return EnergyStdLoss(**loss_params) + else: + loss_params["starter_learning_rate"] = start_lr + return TaskLoss.get_class_by_type(loss_type).get_loss(loss_params) + + +def get_single_model( + _model_params, +): + model = get_model(deepcopy(_model_params)).to(DEVICE) + return model + + +def get_model_for_wrapper(_model_params): + if "model_dict" not in _model_params: + _model = get_single_model( + _model_params, + ) + else: + _model = {} + model_keys = list(_model_params["model_dict"]) + for _model_key in model_keys: + _model[_model_key] = get_single_model( + _model_params["model_dict"][_model_key], + ) + return _model + + +def model_change_out_bias( + _model, + _sample_func, + _bias_adjust_mode="change-by-statistic", +): + old_bias = deepcopy(_model.get_out_bias()) + _model.change_out_bias( + _sample_func, + bias_adjust_mode=_bias_adjust_mode, + ) + new_bias = deepcopy(_model.get_out_bias()) + + model_type_map = _model.get_type_map() + log.info( + f"Change output bias of {model_type_map!s} " + f"from {to_numpy_array(old_bias).reshape(-1)!s} " + f"to {to_numpy_array(new_bias).reshape(-1)!s}." + ) + return _model diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py new file mode 100644 index 0000000000..7c07cbf675 --- /dev/null +++ b/deepmd/pd/train/wrapper.py @@ -0,0 +1,222 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +import logging +from collections import ( + OrderedDict, +) +from typing import ( + Union, +) + +import paddle + +_StateDict = Union[dict[str, paddle.Tensor], OrderedDict[str, paddle.Tensor]] + +# if paddle.__version__.startswith("2"): +# import paddle._dynamo + + +log = logging.getLogger(__name__) + + +class ModelWrapper(paddle.nn.Layer): + def __init__( + self, + model: paddle.nn.Layer | dict, + loss: paddle.nn.Layer | dict = None, + model_params=None, + shared_links=None, + ): + """Construct a DeePMD model wrapper. + + Args: + - config: The Dict-like configuration with training options. + """ + super().__init__() + self.model_params = model_params if model_params is not None else {} + self.train_infos = { + "lr": 0, + "step": 0, + } + self.multi_task = False + self.model = paddle.nn.LayerDict() + # Model + if isinstance(model, paddle.nn.Layer): + self.model["Default"] = model + elif isinstance(model, dict): + self.multi_task = True + for task_key in model: + assert isinstance( + model[task_key], paddle.nn.Layer + ), f"{task_key} in model_dict is not a paddle.nn.Layer!" + self.model[task_key] = model[task_key] + # Loss + self.loss = None + if loss is not None: + self.loss = paddle.nn.LayerDict() + if isinstance(loss, paddle.nn.Layer): + self.loss["Default"] = loss + elif isinstance(loss, dict): + for task_key in loss: + assert isinstance( + loss[task_key], paddle.nn.Layer + ), f"{task_key} in loss_dict is not a paddle.nn.Layer!" + self.loss[task_key] = loss[task_key] + self.inference_only = self.loss is None + + def share_params(self, shared_links, resume=False): + """ + Share the parameters of classes following rules defined in shared_links during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + supported_types = ["descriptor", "fitting_net"] + for shared_item in shared_links: + class_name = shared_links[shared_item]["type"] + shared_base = shared_links[shared_item]["links"][0] + class_type_base = shared_base["shared_type"] + model_key_base = shared_base["model_key"] + shared_level_base = shared_base["shared_level"] + if "descriptor" in class_type_base: + if class_type_base == "descriptor": + base_class = self.model[model_key_base].get_descriptor() + elif "hybrid" in class_type_base: + hybrid_index = int(class_type_base.split("_")[-1]) + base_class = ( + self.model[model_key_base] + .get_descriptor() + .descriptor_list[hybrid_index] + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_base}!") + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + "descriptor" in class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + if class_type_link == "descriptor": + link_class = self.model[model_key_link].get_descriptor() + elif "hybrid" in class_type_link: + hybrid_index = int(class_type_link.split("_")[-1]) + link_class = ( + self.model[model_key_link] + .get_descriptor() + .descriptor_list[hybrid_index] + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_link}!") + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + log.warning( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + else: + if hasattr(self.model[model_key_base], class_type_base): + base_class = self.model[model_key_base].__getattr__(class_type_base) + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + class_type_base == class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + link_class = self.model[model_key_link].__getattr__( + class_type_link + ) + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + log.warning( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + + def forward( + self, + coord, + atype, + spin: paddle.Tensor | None = None, + box: paddle.Tensor | None = None, + cur_lr: paddle.Tensor | None = None, + label: paddle.Tensor | None = None, + task_key: paddle.Tensor | None = None, + inference_only=False, + do_atomic_virial=False, + fparam: paddle.Tensor | None = None, + aparam: paddle.Tensor | None = None, + ): + if not self.multi_task: + task_key = "Default" + else: + assert ( + task_key is not None + ), f"Multitask model must specify the inference task! Supported tasks are {list(self.model.keys())}." + input_dict = { + "coord": coord, + "atype": atype, + "box": box, + "do_atomic_virial": do_atomic_virial, + "fparam": fparam, + "aparam": aparam, + } + has_spin = getattr(self.model[task_key], "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + if has_spin: + input_dict["spin"] = spin + + if self.inference_only or inference_only: + model_pred = self.model[task_key](**input_dict) + return model_pred, None, None + else: + natoms = atype.shape[-1] + model_pred, loss, more_loss = self.loss[task_key]( + input_dict, + self.model[task_key], + label, + natoms=natoms, + learning_rate=cur_lr, + ) + return model_pred, loss, more_loss + + def load_state_dict( + self, + state_dict: _StateDict, + ) -> tuple[list[str], list[str]]: + self.set_extra_state(state_dict.pop("_extra_state")) + return super().set_state_dict(state_dict) + + def set_state_dict( + self, + state_dict: _StateDict, + ) -> tuple[list[str], list[str]]: + return self.load_state_dict(state_dict) + + def state_dict(self): + state_dict = super().state_dict() + extra_state = self.get_extra_state() + state_dict.update({"_extra_state": extra_state}) + return state_dict + + def set_extra_state(self, extra_state: dict): + self.model_params = extra_state["model_params"] + self.train_infos = extra_state["train_infos"] + return None + + def get_extra_state(self) -> dict: + extra_state = { + "model_params": self.model_params, + "train_infos": self.train_infos, + } + return extra_state diff --git a/deepmd/pd/utils/__init__.py b/deepmd/pd/utils/__init__.py new file mode 100644 index 0000000000..7e1043eda4 --- /dev/null +++ b/deepmd/pd/utils/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +from .exclude_mask import ( + AtomExcludeMask, + PairExcludeMask, +) + +__all__ = [ + "PairExcludeMask", + "AtomExcludeMask", +] diff --git a/deepmd/pd/utils/ase_calc.py b/deepmd/pd/utils/ase_calc.py new file mode 100644 index 0000000000..6bcb9cdc5e --- /dev/null +++ b/deepmd/pd/utils/ase_calc.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.calculator import DP as DPCalculator + +__all__ = [ + "DPCalculator", +] diff --git a/deepmd/pd/utils/auto_batch_size.py b/deepmd/pd/utils/auto_batch_size.py new file mode 100644 index 0000000000..8cdb5ddea2 --- /dev/null +++ b/deepmd/pd/utils/auto_batch_size.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.utils.batch_size import AutoBatchSize as AutoBatchSizeBase + + +class AutoBatchSize(AutoBatchSizeBase): + """Auto batch size. + + Parameters + ---------- + initial_batch_size : int, default: 1024 + initial batch size (number of total atoms) when DP_INFER_BATCH_SIZE + is not set + factor : float, default: 2. + increased factor + + """ + + def __init__( + self, + initial_batch_size: int = 1024, + factor: float = 2.0, + ): + super().__init__( + initial_batch_size=initial_batch_size, + factor=factor, + ) + + def is_gpu_available(self) -> bool: + """Check if GPU is available. + + Returns + ------- + bool + True if GPU is available + """ + return paddle.device.cuda.device_count() > 0 + + def is_oom_error(self, e: Exception) -> bool: + """Check if the exception is an OOM error. + + Parameters + ---------- + e : Exception + Exception + """ + # several sources think CUSOLVER_STATUS_INTERNAL_ERROR is another out-of-memory error, + # such as https://github.com/JuliaGPU/CUDA.jl/issues/1924 + # (the meaningless error message should be considered as a bug in cusolver) + if isinstance(e, RuntimeError) and ( + "CUDA out of memory." in e.args[0] + or "CUDA driver error: out of memory" in e.args[0] + or "cusolver error: CUSOLVER_STATUS_INTERNAL_ERROR" in e.args[0] + ): + # Release all unoccupied cached memory + # paddle.device.cuda.empty_cache() + return True + return False diff --git a/deepmd/pd/utils/cache.py b/deepmd/pd/utils/cache.py new file mode 100644 index 0000000000..c40c4050b7 --- /dev/null +++ b/deepmd/pd/utils/cache.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy as copy_lib +import functools + + +def lru_cache(maxsize=16, typed=False, copy=False, deepcopy=False): + if deepcopy: + + def decorator(f): + cached_func = functools.lru_cache(maxsize, typed)(f) + + @functools.wraps(f) + def wrapper(*args, **kwargs): + return copy_lib.deepcopy(cached_func(*args, **kwargs)) + + return wrapper + + elif copy: + + def decorator(f): + cached_func = functools.lru_cache(maxsize, typed)(f) + + @functools.wraps(f) + def wrapper(*args, **kwargs): + return copy_lib.copy(cached_func(*args, **kwargs)) + + return wrapper + + else: + decorator = functools.lru_cache(maxsize, typed) + return decorator diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py new file mode 100644 index 0000000000..7a2bf4fe9c --- /dev/null +++ b/deepmd/pd/utils/dataloader.py @@ -0,0 +1,339 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os +import queue +import time +from collections.abc import ( + Iterator, +) +from multiprocessing.dummy import ( + Pool, +) +from threading import ( + Thread, +) + +import h5py +import numpy as np +import paddle +import paddle.distributed as dist + +# import paddle.multiprocessing +from paddle.io import ( + BatchSampler, + DataLoader, + Dataset, + DistributedBatchSampler, + WeightedRandomSampler, +) +from paddle.io.dataloader.collate import ( + default_collate_fn, +) + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.utils.data_system import ( + print_summary, + prob_sys_size_ext, + process_sys_probs, +) + +log = logging.getLogger(__name__) +# paddle.multiprocessing.set_sharing_strategy("file_system") + + +def setup_seed(seed): + paddle.seed(seed) + os.environ["FLAGS_cudnn_deterministic"] = "True" + + +class DpLoaderSet(Dataset): + """A dataset for storing DataLoaders to multiple Systems. + + Parameters + ---------- + sys_path + Path to the data system + batch_size + Max frame count in a batch. + type_map + Gives the name of different atom types + seed + Random seed for dataloader + shuffle + If the data are shuffled (Only effective in serial mode. Always shuffle in distributed data parallelism) + """ + + def __init__( + self, + systems, + batch_size, + type_map, + seed=None, + shuffle=True, + ): + if seed is not None: + setup_seed(seed) + if isinstance(systems, str): + with h5py.File(systems) as file: + systems = [os.path.join(systems, item) for item in file.keys()] + + self.systems: list[DeepmdDataSetForLoader] = [] + if len(systems) >= 100: + log.info(f"Constructing DataLoaders from {len(systems)} systems") + + def construct_dataset(system): + return DeepmdDataSetForLoader( + system=system, + type_map=type_map, + ) + + MAX_PROCESSES_NUM = 4 + processes = min( + os.cpu_count() + // ( + dist.get_world_size() + if dist.is_available() and dist.is_initialized() + else 1 + ), + MAX_PROCESSES_NUM, + ) + with Pool(processes) as pool: + self.systems = pool.map(construct_dataset, systems) + + self.sampler_list: list[DistributedBatchSampler] = [] + self.index = [] + self.total_batch = 0 + + self.dataloaders = [] + self.batch_sizes = [] + if isinstance(batch_size, str): + if batch_size == "auto": + rule = 32 + elif batch_size.startswith("auto:"): + rule = int(batch_size.split(":")[1]) + else: + rule = None + log.error("Unsupported batch size type") + for ii in self.systems: + ni = ii._natoms + bsi = rule // ni + if bsi * ni < rule: + bsi += 1 + self.batch_sizes.append(bsi) + elif isinstance(batch_size, list): + self.batch_sizes = batch_size + else: + self.batch_sizes = batch_size * np.ones(len(systems), dtype=int) + assert len(self.systems) == len(self.batch_sizes) + for system, batch_size in zip(self.systems, self.batch_sizes): + if dist.is_available() and dist.is_initialized(): + system_batch_sampler = DistributedBatchSampler( + system, + shuffle=( + (not (dist.is_available() and dist.is_initialized())) + and shuffle + ), + batch_size=int(batch_size), + ) + self.sampler_list.append(system_batch_sampler) + else: + system_batch_sampler = BatchSampler( + system, + shuffle=( + (not (dist.is_available() and dist.is_initialized())) + and shuffle + ), + batch_size=int(batch_size), + ) + self.sampler_list.append(system_batch_sampler) + system_dataloader = DataLoader( + dataset=system, + num_workers=0, # Should be 0 to avoid too many threads forked + batch_sampler=system_batch_sampler, + collate_fn=collate_batch, + use_buffer_reader=False, + places=["cpu"], + ) + self.dataloaders.append(system_dataloader) + self.index.append(len(system_dataloader)) + self.total_batch += len(system_dataloader) + + class LazyIter: + """Lazy iterator to prevent fetching data when iter(item).""" + + def __init__(self, item): + self.item = item + + def __iter__(self): + # directly return + return self + + def __next__(self): + if not isinstance(self.item, Iterator): + # make iterator here lazily + self.item = iter(self.item) + return next(self.item) + + self.iters = [] + for item in self.dataloaders: + self.iters.append(LazyIter(item)) + + def set_noise(self, noise_settings): + # noise_settings['noise_type'] # "trunc_normal", "normal", "uniform" + # noise_settings['noise'] # float, default 1.0 + # noise_settings['noise_mode'] # "prob", "fix_num" + # noise_settings['mask_num'] # if "fix_num", int + # noise_settings['mask_prob'] # if "prob", float + # noise_settings['same_mask'] # coord and type same mask? + for system in self.systems: + system.set_noise(noise_settings) + + def __len__(self): + return len(self.dataloaders) + + def __getitem__(self, idx): + # log.warning(str(paddle.distributed.get_rank())+" idx: "+str(idx)+" index: "+str(self.index[idx])) + try: + batch = next(self.iters[idx]) + except StopIteration: + self.iters[idx] = iter(self.dataloaders[idx]) + batch = next(self.iters[idx]) + batch["sid"] = idx + return batch + + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): + """Add data requirement for each system in multiple systems.""" + for system in self.systems: + system.add_data_requirement(data_requirement) + + def print_summary( + self, + name: str, + prob: list[float], + ): + print_summary( + name, + len(self.systems), + [ss.system for ss in self.systems], + [ss._natoms for ss in self.systems], + self.batch_sizes, + [ + ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) + for ii, ss in enumerate(self.systems) + ], + prob, + [ss._data_system.pbc for ss in self.systems], + ) + + +_sentinel = object() +QUEUESIZE = 32 + + +class BackgroundConsumer(Thread): + def __init__(self, queue, source, max_len): + Thread.__init__(self) + self._queue = queue + self._source = source # Main DL iterator + self._max_len = max_len # + + def run(self): + for item in self._source: + self._queue.put(item) # Blocking if the queue is full + + # Signal the consumer we are done. + self._queue.put(_sentinel) + + +class BufferedIterator: + def __init__(self, iterable): + self._queue = queue.Queue(QUEUESIZE) + self._iterable = iterable + self._consumer = None + + self.start_time = time.time() + self.warning_time = None + self.total = len(iterable) + + def _create_consumer(self): + self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) + self._consumer.daemon = True + self._consumer.start() + + def __iter__(self): + return self + + def __len__(self): + return self.total + + def __next__(self): + # Create consumer if not created yet + if self._consumer is None: + self._create_consumer() + # Notify the user if there is a data loading bottleneck + if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): + if time.time() - self.start_time > 5 * 60: + if ( + self.warning_time is None + or time.time() - self.warning_time > 15 * 60 + ): + log.warning( + "Data loading buffer is empty or nearly empty. This may " + "indicate a data loading bottleneck, and increasing the " + "number of workers (--num-workers) may help." + ) + self.warning_time = time.time() + + # Get next example + item = self._queue.get() + if isinstance(item, Exception): + raise item + if item is _sentinel: + raise StopIteration + return item + + +def collate_batch(batch): + example = batch[0] + result = {} + for key in example.keys(): + if "find_" in key: + result[key] = batch[0][key] + else: + if batch[0][key] is None: + result[key] = None + elif key == "fid": + result[key] = [d[key] for d in batch] + elif key == "type": + continue + else: + result[key] = default_collate_fn([d[key] for d in batch]) + return result + + +def get_weighted_sampler(training_data, prob_style, sys_prob=False): + if sys_prob is False: + if prob_style == "prob_uniform": + prob_v = 1.0 / float(training_data.__len__()) + probs = [prob_v for ii in range(training_data.__len__())] + else: # prob_sys_size;A:B:p1;C:D:p2 or prob_sys_size = prob_sys_size;0:nsys:1.0 + if prob_style == "prob_sys_size": + style = f"prob_sys_size;0:{len(training_data)}:1.0" + else: + style = prob_style + probs = prob_sys_size_ext(style, len(training_data), training_data.index) + else: + probs = process_sys_probs(prob_style, training_data.index) + log.debug("Generated weighted sampler with prob array: " + str(probs)) + # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iteraters + len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) + sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) + return sampler diff --git a/deepmd/pd/utils/dataset.py b/deepmd/pd/utils/dataset.py new file mode 100644 index 0000000000..1f0533d8fc --- /dev/null +++ b/deepmd/pd/utils/dataset.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from typing import ( + Optional, +) + +from paddle.io import ( + Dataset, +) + +from deepmd.utils.data import ( + DataRequirementItem, + DeepmdData, +) + + +class DeepmdDataSetForLoader(Dataset): + def __init__(self, system: str, type_map: Optional[list[str]] = None): + """Construct DeePMD-style dataset containing frames cross different systems. + + Args: + - systems: Paths to systems. + - type_map: Atom types. + """ + self.system = system + self._type_map = type_map + self._data_system = DeepmdData(sys_path=system, type_map=self._type_map) + self.mixed_type = self._data_system.mixed_type + self._ntypes = self._data_system.get_ntypes() + self._natoms = self._data_system.get_natoms() + self._natoms_vec = self._data_system.get_natoms_vec(self._ntypes) + + def __len__(self): + return self._data_system.nframes + + def __getitem__(self, index): + """Get a frame from the selected system.""" + b_data = self._data_system.get_item_paddle(index) + b_data["natoms"] = self._natoms_vec + return b_data + + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): + """Add data requirement for this data system.""" + for data_item in data_requirement: + self._data_system.add( + data_item["key"], + data_item["ndof"], + atomic=data_item["atomic"], + must=data_item["must"], + high_prec=data_item["high_prec"], + type_sel=data_item["type_sel"], + repeat=data_item["repeat"], + default=data_item["default"], + dtype=data_item["dtype"], + output_natoms_for_type_sel=data_item["output_natoms_for_type_sel"], + ) diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py new file mode 100644 index 0000000000..25eac1b6d5 --- /dev/null +++ b/deepmd/pd/utils/decomp.py @@ -0,0 +1,247 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# This file is used to implement some paddle functions with composite API, +# so as to support high-order differentation when double-backward is needed. +# For example: [norm] --decomposition--> [multiply, power, sum] +# This file will be removed when implmented functions are decomposed into primitive +# function in Paddle framework in the future. + +from __future__ import ( + annotations, +) + +import paddle + +__all__ = [ + "softmax", + "norm", + "take_along_axis", + "scatter_reduce", + "sec", + "masked_add_", +] + + +# decomposition for forward function +def softmax_decomp(x: paddle.Tensor, axis: int = -1) -> paddle.Tensor: + """Forward decompsition function of softmax. + + Parameters + ---------- + x : paddle.Tensor + Input. + axis : int, defaults: -1. + A dimension along which softmax will be computed. + + Returns + ------- + paddle.Tensor + Computed output. + """ + x_max = paddle.max(x, axis=axis, keepdim=True) + x = x - x_max + return paddle.exp(x) / paddle.sum(paddle.exp(x), axis=axis, keepdim=True) + + +def norm_decomp( + x: paddle.Tensor, p: float = 2, axis: bool = -1, keepdim: bool = False +) -> paddle.Tensor: + """Forward decompsition function of norm. + + Parameters + ---------- + x : paddle.Tensor + Input + p : float, default: 2 + Order of norm + axis : bool, default: -1 + Dimensions over which to compute the vector or matrix norm + keepdim : bool, default: False + If set to True, the reduced dimensions are retained in the result as dimensions + with size one + + Returns + ------- + paddle.Tensor + A real-valued tensor, even when A is complex. + """ + if p == 2 or p == 2.0: + # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward + return (x * x).sum(axis=axis, keepdim=keepdim).clip(1e-12) ** 0.5 + return (x**p).sum(axis=axis, keepdim=keepdim) ** (1 / p) + + +def take_along_axis_decomp( + x: paddle.Tensor, indices: paddle.Tensor, axis: int, broadcast: bool = True +) -> paddle.Tensor: + """Forward decompsition function of take_along_axis. + + Parameters + ---------- + x : paddle.Tensor + The input tensor. + indices : paddle.Tensor + Indices to take along each 1d slice of array. + axis : int + The axis to take 1d slices along. + broadcast : bool, default: True + Whether the indices broadcast. + + Returns + ------- + paddle.Tensor + Computed output. + """ + # manually contruct indices for gather_nd(ind_gather_nd.ndim == indices.ndim + 1, + # the lsat 1 represents the number of dimension(s) of indices) + ind_gather_nd = paddle.stack( + paddle.meshgrid(*[paddle.arange(v) for v in indices.shape], indexing="ij"), + axis=-1, + ) + ind_gather_nd[..., axis] = indices + # compute output using constructed indices via gather_nd + out = paddle.gather_nd(x, ind_gather_nd) + return out + + +def scatter_reduce_decomp( + input: paddle.Tensor, + axis: int, + index: paddle.Tensor, + src: paddle.Tensor, + reduce: str, +) -> paddle.Tensor: + """Forward decompsition function of scatter_reduce. + + Parameters + ---------- + input : paddle.Tensor + Input tensor. + axis : int + The axis along which to index. + index : paddle.Tensor + The indices of elements to scatter and reduce. + src : paddle.Tensor + The source elements to scatter and reduce. + reduce : str + The reduction operation to apply for non-unique indices. + Supported modes: ("sum", "prod", "mean", "amax", "amin"). + + Returns + ------- + paddle.Tensor + Computed output. + """ + # reduce: "sum", "prod", "mean", "amax", "amin" + if reduce == "sum": + input.put_along_axis_(indices=index, values=src, axis=axis, reduce="add") + elif reduce == "mean": + input.put_along_axis_(indices=index, values=src, axis=axis, reduce="add") + dst_div = paddle.ones_like(input).put_along_axis( + indices=index, + values=paddle.to_tensor(1.0, dtype=input.dtype), + axis=axis, + reduce="add", + ) + input = input / dst_div + elif reduce == "prod": + input = input.put_along_axis(indices=index, values=src, axis=axis, reduce="mul") + else: + raise NotImplementedError("only support mode in ['sum', 'prod', 'mean']!") + return input + + +def sec(length: int, size: int) -> list[int]: + """Auxiliary function for decomposed functions. + + If length is not divisible by size, the last chunk will be smaller. + + Parameters + ---------- + length : int + Length to be chunked. + size : int + Chunk size. + + Returns + ------- + list[int] + Chunked output list. + """ + assert length > 0 + assert size > 0 + if length % size == 0: + return [size] * (length // size) + return [size] * (length // size) + [length % size] + + +def masked_add__decomp( + x: paddle.Tensor, mask: paddle.Tensor, v: paddle.Tensor +) -> paddle.Tensor: + """Forward decompsition function of masked_add_(inplace operator). + + Parameters + ---------- + x : paddle.Tensor + Input tensor. + mask : paddle.Tensor + Mask tensor. + v : paddle.Tensor + Value to add. + + Returns + ------- + paddle.Tensor + Computed output. + """ + assert mask.dtype == paddle.bool, f"mask must be bool type, but got {mask.dtype}" + # indices is bool mask + mask_coord = paddle.concat( + paddle.nonzero(mask, as_tuple=True), + axis=1, + ) # [nz, dim] + if not paddle.is_tensor(v): + v = paddle.full([mask_coord.shape[0]], v, dtype=x.dtype) + t = paddle.scatter_nd_add( + x, + mask_coord, + v, + ) + paddle.assign(t, x) # inplace update + return x + + +def normalize_decomp( + x: paddle.Tensor, + p: float = 2, + axis: int = 1, + epsilon: float = 1e-12, +) -> paddle.Tensor: + """Forward decompsition function of normalize. + + Parameters + ---------- + x : paddle.Tensor + Input tensor. + p : float, optional + Order of the norm, default: 2 + axis : int, optional + Axis on which to perform normalization, default: 1 + epsilon : float, optional + Epislon value, default: 1e-12 + + Returns + ------- + paddle.Tensor + Computed output. + """ + return x / (norm(x, p=p, axis=axis, keepdim=True).clip(min=epsilon)) + + +# alias for decomposed functions for convinience +normalize = normalize_decomp +masked_add_ = masked_add__decomp +scatter_reduce = scatter_reduce_decomp +take_along_axis = take_along_axis_decomp +norm = norm_decomp +softmax = softmax_decomp diff --git a/deepmd/pd/utils/dp_random.py b/deepmd/pd/utils/dp_random.py new file mode 100644 index 0000000000..e81488c506 --- /dev/null +++ b/deepmd/pd/utils/dp_random.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.utils.random import ( + choice, + random, + seed, + shuffle, +) + +__all__ = [ + "choice", + "random", + "seed", + "shuffle", +] diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py new file mode 100644 index 0000000000..867deac35b --- /dev/null +++ b/deepmd/pd/utils/env.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os + +import numpy as np +import paddle + +from deepmd.common import ( + VALID_PRECISION, +) +from deepmd.env import ( + GLOBAL_ENER_FLOAT_PRECISION, + GLOBAL_NP_FLOAT_PRECISION, + get_default_nthreads, + set_default_nthreads, +) + +SAMPLER_RECORD = os.environ.get("SAMPLER_RECORD", False) +try: + # only linux + ncpus = len(os.sched_getaffinity(0)) +except AttributeError: + ncpus = os.cpu_count() +NUM_WORKERS = int(os.environ.get("NUM_WORKERS", min(0, ncpus))) +# Make sure DDP uses correct device if applicable +LOCAL_RANK = paddle.distributed.get_rank() + +if os.environ.get("DEVICE") == "cpu" or paddle.device.cuda.device_count() <= 0: + DEVICE = "cpu" +else: + DEVICE = f"gpu:{LOCAL_RANK}" + +paddle.device.set_device(DEVICE) + +JIT = False +CINN = False +CACHE_PER_SYS = 5 # keep at most so many sets per sys in memory +ENERGY_BIAS_TRAINABLE = True + +PRECISION_DICT = { + "float16": paddle.float16, + "float32": paddle.float32, + "float64": paddle.float64, + "half": paddle.float16, + "single": paddle.float32, + "double": paddle.float64, + "int32": paddle.int32, + "int64": paddle.int64, + "bfloat16": paddle.bfloat16, + "bool": paddle.bool, +} +GLOBAL_PD_FLOAT_PRECISION = PRECISION_DICT[np.dtype(GLOBAL_NP_FLOAT_PRECISION).name] +GLOBAL_PD_ENER_FLOAT_PRECISION = PRECISION_DICT[ + np.dtype(GLOBAL_ENER_FLOAT_PRECISION).name +] +PRECISION_DICT["default"] = GLOBAL_PD_FLOAT_PRECISION +assert VALID_PRECISION.issubset(PRECISION_DICT.keys()) +# cannot automatically generated +RESERVED_PRECISON_DICT = { + paddle.float16: "float16", + paddle.float32: "float32", + paddle.float64: "float64", + paddle.int32: "int32", + paddle.int64: "int64", + paddle.bfloat16: "bfloat16", + paddle.bool: "bool", +} +assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISON_DICT.keys()) +DEFAULT_PRECISION = "float64" + +# throw warnings if threads not set +set_default_nthreads() +inter_nthreads, intra_nthreads = get_default_nthreads() +# if inter_nthreads > 0: # the behavior of 0 is not documented +# paddle.set_num_interop_threads(inter_nthreads) +# if intra_nthreads > 0: +# paddle.framework.core.set_num_threads(intra_nthreads) + + +def enable_prim(enable: bool = True): + """Enable running program in primitive C++ API in eager/static mode.""" + if enable: + from paddle.framework import ( + core, + ) + + core.set_prim_eager_enabled(True) + core._set_prim_all_enabled(True) + log = logging.getLogger(__name__) + log.info("Enable prim in eager and static mode.") + + +__all__ = [ + "GLOBAL_ENER_FLOAT_PRECISION", + "GLOBAL_NP_FLOAT_PRECISION", + "GLOBAL_PD_FLOAT_PRECISION", + "GLOBAL_PD_ENER_FLOAT_PRECISION", + "DEFAULT_PRECISION", + "PRECISION_DICT", + "RESERVED_PRECISON_DICT", + "SAMPLER_RECORD", + "NUM_WORKERS", + "DEVICE", + "JIT", + "CINN", + "CACHE_PER_SYS", + "ENERGY_BIAS_TRAINABLE", + "LOCAL_RANK", +] diff --git a/deepmd/pd/utils/env_mat_stat.py b/deepmd/pd/utils/env_mat_stat.py new file mode 100644 index 0000000000..1cc67ecfee --- /dev/null +++ b/deepmd/pd/utils/env_mat_stat.py @@ -0,0 +1,235 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) +from typing import ( + TYPE_CHECKING, + Union, +) + +import numpy as np +import paddle + +from deepmd.common import ( + get_hash, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.utils.env_mat_stat import EnvMatStat as BaseEnvMatStat +from deepmd.utils.env_mat_stat import ( + StatItem, +) + +if TYPE_CHECKING: + from deepmd.pd.model.descriptor import ( + DescriptorBlock, + ) + + +class EnvMatStat(BaseEnvMatStat): + def compute_stat(self, env_mat: dict[str, paddle.Tensor]) -> dict[str, StatItem]: + """Compute the statistics of the environment matrix for a single system. + + Parameters + ---------- + env_mat : paddle.Tensor + The environment matrix. + + Returns + ------- + Dict[str, StatItem] + The statistics of the environment matrix. + """ + stats = {} + for kk, vv in env_mat.items(): + stats[kk] = StatItem( + number=vv.numel().item(), + sum=vv.sum().item() if vv.numel().item() != 0 else paddle.zeros([]), + squared_sum=paddle.square(vv).sum().item() + if vv.numel().item() != 0 + else paddle.zeros([]), + ) + return stats + + +class EnvMatStatSe(EnvMatStat): + """Environmental matrix statistics for the se_a/se_r environemntal matrix. + + Parameters + ---------- + descriptor : DescriptorBlock + The descriptor of the model. + """ + + def __init__(self, descriptor: "DescriptorBlock"): + super().__init__() + self.descriptor = descriptor + self.last_dim = ( + self.descriptor.ndescrpt // self.descriptor.nnei + ) # se_r=1, se_a=4 + + def iter( + self, data: list[dict[str, Union[paddle.Tensor, list[tuple[int, int]]]]] + ) -> Iterator[dict[str, StatItem]]: + """Get the iterator of the environment matrix. + + Parameters + ---------- + data : List[Dict[str, Union[paddle.Tensor, List[Tuple[int, int]]]]] + The data. + + Yields + ------ + Dict[str, StatItem] + The statistics of the environment matrix. + """ + zero_mean = paddle.zeros( + [self.descriptor.get_ntypes(), self.descriptor.get_nsel(), self.last_dim], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(env.DEVICE) + one_stddev = paddle.ones( + [self.descriptor.get_ntypes(), self.descriptor.get_nsel(), self.last_dim], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(env.DEVICE) + if self.last_dim == 4: + radial_only = False + elif self.last_dim == 1: + radial_only = True + else: + raise ValueError( + "last_dim should be 1 for raial-only or 4 for full descriptor." + ) + for system in data: + coord, atype, box, natoms = ( + system["coord"], + system["atype"], + system["box"], + system["natoms"], + ) + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + self.descriptor.get_rcut(), + self.descriptor.get_sel(), + mixed_types=self.descriptor.mixed_types(), + box=box, + ) + env_mat, _, _ = prod_env_mat( + extended_coord, + nlist, + atype, + zero_mean, + one_stddev, + self.descriptor.get_rcut(), + self.descriptor.get_rcut_smth(), + radial_only, + protection=self.descriptor.get_env_protection(), + ) + # apply excluded_types + exclude_mask = self.descriptor.emask(nlist, extended_atype) + env_mat *= exclude_mask.unsqueeze(-1).astype(env_mat.dtype) + # reshape to nframes * nloc at the atom level, + # so nframes/mixed_type do not matter + env_mat = env_mat.reshape( + [ + coord.shape[0] * coord.shape[1], + self.descriptor.get_nsel(), + self.last_dim, + ] + ) + atype = atype.reshape([coord.shape[0] * coord.shape[1]]) + # (1, nloc) eq (ntypes, 1), so broadcast is possible + # shape: (ntypes, nloc) + type_idx = paddle.equal( + atype.reshape([1, -1]), + paddle.arange(self.descriptor.get_ntypes(), dtype=atype.dtype) + .to(device=env.DEVICE) + .reshape([-1, 1]), + ) + if "pair_exclude_types" in system: + # shape: (1, nloc, nnei) + exclude_mask = PairExcludeMask( + self.descriptor.get_ntypes(), system["pair_exclude_types"] + )(nlist, extended_atype).reshape( + [1, coord.shape[0] * coord.shape[1], -1] + ) + # shape: (ntypes, nloc, nnei) + type_idx = paddle.logical_and(type_idx.unsqueeze(-1), exclude_mask) + for type_i in range(self.descriptor.get_ntypes()): + dd = env_mat[type_idx[type_i]] + dd = dd.reshape([-1, self.last_dim]) # typen_atoms * unmasked_nnei, 4 + env_mats = {} + env_mats[f"r_{type_i}"] = dd[:, :1] + if self.last_dim == 4: + env_mats[f"a_{type_i}"] = dd[:, 1:] + yield self.compute_stat(env_mats) + + def get_hash(self) -> str: + """Get the hash of the environment matrix. + + Returns + ------- + str + The hash of the environment matrix. + """ + dscpt_type = "se_a" if self.last_dim == 4 else "se_r" + return get_hash( + { + "type": dscpt_type, + "ntypes": self.descriptor.get_ntypes(), + "rcut": round(self.descriptor.get_rcut(), 2), + "rcut_smth": round(self.descriptor.rcut_smth, 2), + "nsel": self.descriptor.get_nsel(), + "sel": self.descriptor.get_sel(), + "mixed_types": self.descriptor.mixed_types(), + } + ) + + def __call__(self): + avgs = self.get_avg() + stds = self.get_std() + + all_davg = [] + all_dstd = [] + + for type_i in range(self.descriptor.get_ntypes()): + if self.last_dim == 4: + davgunit = [[avgs[f"r_{type_i}"], 0, 0, 0]] + dstdunit = [ + [ + stds[f"r_{type_i}"], + stds[f"a_{type_i}"], + stds[f"a_{type_i}"], + stds[f"a_{type_i}"], + ] + ] + elif self.last_dim == 1: + davgunit = [[avgs[f"r_{type_i}"]]] + dstdunit = [ + [ + stds[f"r_{type_i}"], + ] + ] + davg = np.tile(davgunit, [self.descriptor.get_nsel(), 1]) + dstd = np.tile(dstdunit, [self.descriptor.get_nsel(), 1]) + all_davg.append(davg) + all_dstd.append(dstd) + + mean = np.stack(all_davg) + stddev = np.stack(all_dstd) + return mean, stddev diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py new file mode 100644 index 0000000000..088ac186a8 --- /dev/null +++ b/deepmd/pd/utils/exclude_mask.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.utils import ( + to_paddle_tensor, +) + + +class AtomExcludeMask(paddle.nn.Layer): + """Computes the type exclusion mask for atoms.""" + + def __init__( + self, + ntypes: int, + exclude_types: list[int] = [], + ): + super().__init__() + self.reinit(ntypes, exclude_types) + + def reinit( + self, + ntypes: int, + exclude_types: list[int] = [], + ): + self.ntypes = ntypes + self.exclude_types = exclude_types + self.type_mask = np.array( + [1 if tt_i not in self.exclude_types else 0 for tt_i in range(ntypes)], + dtype=np.int32, + ) + self.type_mask = to_paddle_tensor(self.type_mask).reshape([-1]) + + def get_exclude_types(self): + return self.exclude_types + + def get_type_mask(self): + return self.type_mask + + def forward( + self, + atype: paddle.Tensor, + ) -> paddle.Tensor: + """Compute type exclusion mask for atoms. + + Parameters + ---------- + atype + The extended atom types. shape: nf x natom + + Returns + ------- + mask + The type exclusion mask for atoms. shape: nf x natom + Element [ff,ii] being 0 if type(ii) is excluded, + otherwise being 1. + + """ + nf, natom = atype.shape + return self.type_mask[atype].reshape([nf, natom]).to(atype.place) + + +class PairExcludeMask(paddle.nn.Layer): + """Computes the type exclusion mask for atom pairs.""" + + def __init__( + self, + ntypes: int, + exclude_types: list[tuple[int, int]] = [], + ): + super().__init__() + self.reinit(ntypes, exclude_types) + + def reinit( + self, + ntypes: int, + exclude_types: list[tuple[int, int]] = [], + ): + self.ntypes = ntypes + self._exclude_types: set[tuple[int, int]] = set() + for tt in exclude_types: + assert len(tt) == 2 + self._exclude_types.add((tt[0], tt[1])) + self._exclude_types.add((tt[1], tt[0])) + # ntypes + 1 for nlist masks + self.type_mask = np.array( + [ + [ + 1 if (tt_i, tt_j) not in self._exclude_types else 0 + for tt_i in range(ntypes + 1) + ] + for tt_j in range(ntypes + 1) + ], + dtype=np.int32, + ) + # (ntypes+1 x ntypes+1) + self.type_mask = to_paddle_tensor(self.type_mask).reshape([-1]) + self.no_exclusion = len(self._exclude_types) == 0 + + def get_exclude_types(self): + return self._exclude_types + + # may have a better place for this method... + def forward( + self, + nlist: paddle.Tensor, + atype_ext: paddle.Tensor, + ) -> paddle.Tensor: + """Compute type exclusion mask. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + atype_ext + The extended aotm types. shape: nf x nall + + Returns + ------- + mask + The type exclusion mask of shape: nf x nloc x nnei. + Element [ff,ii,jj] being 0 if type(ii), type(nlist[ff,ii,jj]) is excluded, + otherwise being 1. + + """ + if self.no_exclusion: + # safely return 1 if nothing is excluded. + return paddle.ones_like(nlist, dtype=paddle.int32).to(device=nlist.place) + nf, nloc, nnei = nlist.shape + nall = atype_ext.shape[1] + # add virtual atom of type ntypes. nf x nall+1 + ae = paddle.concat( + [ + atype_ext, + self.ntypes + * paddle.ones([nf, 1], dtype=atype_ext.dtype).to( + device=atype_ext.place + ), + ], + axis=-1, + ) + type_i = atype_ext[:, :nloc].reshape([nf, nloc]) * (self.ntypes + 1) + # nf x nloc x nnei + index = paddle.where(nlist == -1, nall, nlist).reshape([nf, nloc * nnei]) + # type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( + # [nf, nloc, nnei] + # ) + type_j = decomp.take_along_axis(ae, axis=1, indices=index).reshape( + [nf, nloc, nnei] + ) + type_ij = type_i[:, :, None] + type_j + # nf x (nloc x nnei) + type_ij = type_ij.reshape([nf, nloc * nnei]) + mask = ( + self.type_mask[type_ij] + .reshape([nf, nloc, nnei]) + .to(atype_ext.place) + .astype("bool") + ) + return mask diff --git a/deepmd/pd/utils/finetune.py b/deepmd/pd/utils/finetune.py new file mode 100644 index 0000000000..edac72d9c9 --- /dev/null +++ b/deepmd/pd/utils/finetune.py @@ -0,0 +1,200 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from copy import ( + deepcopy, +) + +import paddle + +from deepmd.utils.finetune import ( + FinetuneRuleItem, +) + +log = logging.getLogger(__name__) + + +def get_finetune_rule_single( + _single_param_target, + _model_param_pretrained, + from_multitask=False, + model_branch="Default", + model_branch_from="", + change_model_params=False, +): + single_config = deepcopy(_single_param_target) + new_fitting = False + model_branch_chosen = "Default" + + if not from_multitask: + single_config_chosen = deepcopy(_model_param_pretrained) + if model_branch_from == "RANDOM": + # not ["", "RANDOM"], because single-from-single finetune uses pretrained fitting in default + new_fitting = True + else: + model_dict_params = _model_param_pretrained["model_dict"] + if model_branch_from in ["", "RANDOM"]: + model_branch_chosen = next(iter(model_dict_params.keys())) + new_fitting = True + log.warning( + "The fitting net will be re-init instead of using that in the pretrained model! " + "The bias_adjust_mode will be set-by-statistic!" + ) + else: + model_branch_chosen = model_branch_from + assert model_branch_chosen in model_dict_params, ( + f"No model branch named '{model_branch_chosen}'! " + f"Available ones are {list(model_dict_params.keys())}." + ) + single_config_chosen = deepcopy(model_dict_params[model_branch_chosen]) + old_type_map, new_type_map = ( + single_config_chosen["type_map"], + single_config["type_map"], + ) + finetune_rule = FinetuneRuleItem( + p_type_map=old_type_map, + type_map=new_type_map, + model_branch=model_branch_chosen, + random_fitting=new_fitting, + ) + if change_model_params: + trainable_param = { + "descriptor": single_config.get("descriptor", {}).get("trainable", True), + "fitting_net": single_config.get("fitting_net", {}).get("trainable", True), + } + single_config["descriptor"] = single_config_chosen["descriptor"] + if not new_fitting: + single_config["fitting_net"] = single_config_chosen["fitting_net"] + log.info( + f"Change the '{model_branch}' model configurations according to the model branch " + f"'{model_branch_chosen}' in the pretrained one..." + ) + for net_type in trainable_param: + if net_type in single_config: + single_config[net_type]["trainable"] = trainable_param[net_type] + else: + single_config[net_type] = {"trainable": trainable_param[net_type]} + return single_config, finetune_rule + + +def get_finetune_rules( + finetune_model, model_config, model_branch="", change_model_params=True +): + """ + Get fine-tuning rules and (optionally) change the model_params according to the pretrained one. + + This function gets the fine-tuning rules and (optionally) changes input in different modes as follows: + 1. Single-task fine-tuning from a single-task pretrained model: + - The model will be fine-tuned based on the pretrained model. + - (Optional) Updates the model parameters based on the pretrained model. + 2. Single-task fine-tuning from a multi-task pretrained model: + - The model will be fine-tuned based on the selected branch in the pretrained model. + The chosen branch can be defined from the command-line or `finetune_head` input parameter. + If not defined, model parameters in the fitting network will be randomly initialized. + - (Optional) Updates the model parameters based on the selected branch in the pretrained model. + 3. Multi-task fine-tuning from a single-task pretrained model: + - The model in each branch will be fine-tuned or resumed based on the single branch ('Default') in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model. + - If `finetune_head` is not defined and the model_key is 'Default', + it will resume from the single branch ('Default') in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and the model_key is not 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the single branch ('Default') in the pretrained model. + 4. Multi-task fine-tuning from a multi-task pretrained model: + - The model in each branch will be fine-tuned or resumed based on the chosen branches in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as one of the branches in the pretrained model, + it will be fine-tuned based on the chosen branch in the pretrained model. + - If `finetune_head` is not defined and the model_key is the same as one of those in the pretrained model, + it will resume from the model_key branch in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and a new model_key is used, + it will be fine-tuned based on the chosen branch in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the chosen branches in the pretrained model. + + Parameters + ---------- + finetune_model + The pretrained model. + model_config + The fine-tuning input parameters. + model_branch + The model branch chosen in command-line mode, only for single-task fine-tuning. + change_model_params + Whether to change the model parameters according to the pretrained one. + + Returns + ------- + model_config: + Updated model parameters. + finetune_links: + Fine-tuning rules in a dict format, with `model_branch`: FinetuneRuleItem pairs. + """ + multi_task = "model_dict" in model_config + state_dict = paddle.load(finetune_model) + if "model" in state_dict: + state_dict = state_dict["model"] + last_model_params = state_dict["_extra_state"]["model_params"] + finetune_from_multi_task = "model_dict" in last_model_params + finetune_links = {} + if not multi_task: + # use command-line first + if model_branch == "" and "finetune_head" in model_config: + model_branch = model_config["finetune_head"] + model_config, finetune_rule = get_finetune_rule_single( + model_config, + last_model_params, + from_multitask=finetune_from_multi_task, + model_branch="Default", + model_branch_from=model_branch, + change_model_params=change_model_params, + ) + finetune_links["Default"] = finetune_rule + else: + assert model_branch == "", ( + "Multi-task fine-tuning does not support command-line branches chosen!" + "Please define the 'finetune_head' in each model params!" + ) + target_keys = model_config["model_dict"].keys() + if not finetune_from_multi_task: + pretrained_keys = ["Default"] + else: + pretrained_keys = last_model_params["model_dict"].keys() + for model_key in target_keys: + resuming = False + if ( + "finetune_head" in model_config["model_dict"][model_key] + and model_config["model_dict"][model_key]["finetune_head"] != "RANDOM" + ): + pretrained_key = model_config["model_dict"][model_key]["finetune_head"] + assert pretrained_key in pretrained_keys, ( + f"'{pretrained_key}' head chosen to finetune not exist in the pretrained model!" + f"Available heads are: {list(pretrained_keys)}" + ) + model_branch_from = pretrained_key + elif ( + "finetune_head" not in model_config["model_dict"][model_key] + and model_key in pretrained_keys + ): + # not do anything if not defined "finetune_head" in heads that exist in the pretrained model + # this will just do resuming + model_branch_from = model_key + resuming = True + else: + # if not defined "finetune_head" in new heads or "finetune_head" is "RANDOM", the fitting net will bre randomly initialized + model_branch_from = "RANDOM" + model_config["model_dict"][model_key], finetune_rule = ( + get_finetune_rule_single( + model_config["model_dict"][model_key], + last_model_params, + from_multitask=finetune_from_multi_task, + model_branch=model_key, + model_branch_from=model_branch_from, + change_model_params=change_model_params, + ) + ) + finetune_links[model_key] = finetune_rule + finetune_links[model_key].resuming = resuming + return model_config, finetune_links diff --git a/deepmd/pd/utils/learning_rate.py b/deepmd/pd/utils/learning_rate.py new file mode 100644 index 0000000000..94c657abd4 --- /dev/null +++ b/deepmd/pd/utils/learning_rate.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + + +class LearningRateExp: + def __init__( + self, + start_lr, + stop_lr, + decay_steps, + stop_steps, + decay_rate=None, + **kwargs, + ): + """ + Construct an exponential-decayed learning rate. + + Parameters + ---------- + start_lr + The learning rate at the start of the training. + stop_lr + The desired learning rate at the end of the training. + When decay_rate is explicitly set, this value will serve as + the minimum learning rate during training. In other words, + if the learning rate decays below stop_lr, stop_lr will be applied instead. + decay_steps + The learning rate is decaying every this number of training steps. + stop_steps + The total training steps for learning rate scheduler. + decay_rate + The decay rate for the learning rate. + If provided, the decay rate will be set instead of + calculating it through interpolation between start_lr and stop_lr. + """ + self.start_lr = start_lr + default_ds = 100 if stop_steps // 10 > 100 else stop_steps // 100 + 1 + self.decay_steps = decay_steps + if self.decay_steps >= stop_steps: + self.decay_steps = default_ds + self.decay_rate = np.exp( + np.log(stop_lr / self.start_lr) / (stop_steps / self.decay_steps) + ) + if decay_rate is not None: + self.decay_rate = decay_rate + self.min_lr = stop_lr + + def value(self, step): + """Get the learning rate at the given step.""" + step_lr = self.start_lr * np.power(self.decay_rate, step // self.decay_steps) + if step_lr < self.min_lr: + step_lr = self.min_lr + return step_lr diff --git a/deepmd/pd/utils/multi_task.py b/deepmd/pd/utils/multi_task.py new file mode 100644 index 0000000000..680dc53c79 --- /dev/null +++ b/deepmd/pd/utils/multi_task.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) + +from deepmd.pd.model.descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task import ( + BaseFitting, +) + + +def preprocess_shared_params(model_config): + """Preprocess the model params for multitask model, and generate the links dict for further sharing. + + Args: + model_config: Model params of multitask model. + + Returns + ------- + model_config: Preprocessed model params of multitask model. + Those string names are replaced with real params in `shared_dict` of model params. + shared_links: Dict of link infos for further sharing. + Each item, whose key must be in `shared_dict`, is a dict with following keys: + - "type": The real class type of this item. + - "links": List of shared settings, each sub-item is a dict with following keys: + - "model_key": Model key in the `model_dict` to share this item. + - "shared_type": Type of this shard item. + - "shared_level": Shared level (int) of this item in this model. + Lower for more params to share, 0 means to share all params in this item. + This list are sorted by "shared_level". + For example, if one has `model_config` like this: + "model": { + "shared_dict": { + "my_type_map": ["foo", "bar"], + "my_des1": { + "type": "se_e2_a", + "neuron": [10, 20, 40] + }, + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_des1", + "fitting_net": { + "neuron": [100, 100, 100] + } + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_des1", + "fitting_net": { + "neuron": [100, 100, 100] + } + } + "model_3": { + "type_map": "my_type_map", + "descriptor": "my_des1:1", + "fitting_net": { + "neuron": [100, 100, 100] + } + } + } + } + The above config will init three model branches named `model_1` and `model_2` and `model_3`, + in which: + - `model_2` and `model_3` will have the same `type_map` as that in `model_1`. + - `model_2` will share all the parameters of `descriptor` with `model_1`, + while `model_3` will share part of parameters of `descriptor` with `model_1` + on human-defined share-level `1` (default is `0`, meaning share all the parameters). + - `model_1`, `model_2` and `model_3` have three different `fitting_net`s. + The returned `model_config` will automatically fulfill the input `model_config` as if there's no sharing, + and the `shared_links` will keep all the sharing information with looking: + { + 'my_des1': { + 'type': 'DescrptSeA', + 'links': [ + {'model_key': 'model_1', + 'shared_type': 'descriptor', + 'shared_level': 0}, + {'model_key': 'model_2', + 'shared_type': 'descriptor', + 'shared_level': 0}, + {'model_key': 'model_3', + 'shared_type': 'descriptor', + 'shared_level': 1} + ] + } + } + + """ + assert "model_dict" in model_config, "only multi-task model can use this method!" + supported_types = ["type_map", "descriptor", "fitting_net"] + shared_dict = model_config.get("shared_dict", {}) + shared_links = {} + type_map_keys = [] + + def replace_one_item(params_dict, key_type, key_in_dict, suffix="", index=None): + shared_type = key_type + shared_key = key_in_dict + shared_level = 0 + if ":" in key_in_dict: + shared_key = key_in_dict.split(":")[0] + shared_level = int(key_in_dict.split(":")[1]) + assert ( + shared_key in shared_dict + ), f"Appointed {shared_type} {shared_key} are not in the shared_dict! Please check the input params." + if index is None: + params_dict[shared_type] = deepcopy(shared_dict[shared_key]) + else: + params_dict[index] = deepcopy(shared_dict[shared_key]) + if shared_type == "type_map": + if key_in_dict not in type_map_keys: + type_map_keys.append(key_in_dict) + else: + if shared_key not in shared_links: + class_name = get_class_name(shared_type, shared_dict[shared_key]) + shared_links[shared_key] = {"type": class_name, "links": []} + link_item = { + "model_key": model_key, + "shared_type": shared_type + suffix, + "shared_level": shared_level, + } + shared_links[shared_key]["links"].append(link_item) + + for model_key in model_config["model_dict"]: + model_params_item = model_config["model_dict"][model_key] + for item_key in model_params_item: + if item_key in supported_types: + item_params = model_params_item[item_key] + if isinstance(item_params, str): + replace_one_item(model_params_item, item_key, item_params) + elif item_params.get("type", "") == "hybrid": + for ii, hybrid_item in enumerate(item_params["list"]): + if isinstance(hybrid_item, str): + replace_one_item( + model_params_item[item_key]["list"], + item_key, + hybrid_item, + suffix=f"_hybrid_{ii}", + index=ii, + ) + for shared_key in shared_links: + shared_links[shared_key]["links"] = sorted( + shared_links[shared_key]["links"], + key=lambda x: x["shared_level"] + - ("spin" in model_config["model_dict"][x["model_key"]]) * 100, + ) + # little trick to make spin models in the front to be the base models, + # because its type embeddings are more general. + assert len(type_map_keys) == 1, "Multitask model must have only one type_map!" + return model_config, shared_links + + +def get_class_name(item_key, item_params): + if item_key == "descriptor": + return BaseDescriptor.get_class_by_type(item_params.get("type", "se_e2_a")) + elif item_key == "fitting_net": + return BaseFitting.get_class_by_type(item_params.get("type", "ener")) + else: + raise RuntimeError(f"Unknown class_name type {item_key}") diff --git a/deepmd/pd/utils/neighbor_stat.py b/deepmd/pd/utils/neighbor_stat.py new file mode 100644 index 0000000000..a1e60459ca --- /dev/null +++ b/deepmd/pd/utils/neighbor_stat.py @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle + +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.pd.utils.nlist import ( + extend_coord_with_ghosts, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.neighbor_stat import NeighborStat as BaseNeighborStat + + +class NeighborStatOP(paddle.nn.Layer): + """Class for getting neighbor statics data information. + + Parameters + ---------- + ntypes + The num of atom types + rcut + The cut-off radius + mixed_types : bool, optional + If True, treat neighbors of all types as a single type. + """ + + def __init__( + self, + ntypes: int, + rcut: float, + mixed_types: bool, + ) -> None: + super().__init__() + self.rcut = rcut + self.ntypes = ntypes + self.mixed_types = mixed_types + + def forward( + self, + coord: paddle.Tensor, + atype: paddle.Tensor, + cell: Optional[paddle.Tensor], + ) -> tuple[paddle.Tensor, paddle.Tensor]: + """Calculate the neareest neighbor distance between atoms, maximum nbor size of + atoms and the output data range of the environment matrix. + + Parameters + ---------- + coord + The coordinates of atoms. + atype + The atom types. + cell + The cell. + + Returns + ------- + paddle.Tensor + The minimal squared distance between two atoms, in the shape of (nframes,) + paddle.Tensor + The maximal number of neighbors + """ + nframes = coord.shape[0] + coord = coord.reshape([nframes, -1, 3]) + nloc = coord.shape[1] + coord = coord.reshape([nframes, nloc * 3]) + extend_coord, extend_atype, _ = extend_coord_with_ghosts( + coord, atype, cell, self.rcut + ) + + coord1 = extend_coord.reshape([nframes, -1]) + nall = coord1.shape[1] // 3 + coord0 = coord1[:, : nloc * 3] + diff: paddle.Tensor = coord1.reshape([nframes, -1, 3]).unsqueeze( + 1 + ) - coord0.reshape([nframes, -1, 3]).unsqueeze(2) + assert list(diff.shape) == [nframes, nloc, nall, 3] + # remove the diagonal elements + mask = paddle.eye(nloc, nall).to(dtype=paddle.bool, device=diff.place) + # diff[:, mask] = float("inf") + # diff.masked_fill_( + # paddle.broadcast_to(mask.unsqueeze([0, -1]), diff.shape), + # paddle.to_tensor(float("inf")), + # ) + diff[paddle.broadcast_to(mask.unsqueeze([0, -1]), diff.shape)] = float("inf") + rr2 = paddle.sum(paddle.square(diff), axis=-1) + min_rr2 = paddle.min(rr2, axis=-1) + # count the number of neighbors + if not self.mixed_types: + mask = rr2 < self.rcut**2 + nnei = paddle.zeros((nframes, nloc, self.ntypes), dtype=paddle.int64) + for ii in range(self.ntypes): + nnei[:, :, ii] = paddle.sum( + mask & ((extend_atype == ii)[:, None, :]), axis=-1 + ) + else: + mask = rr2 < self.rcut**2 + # virtual types (<0) are not counted + nnei = paddle.sum( + mask & ((extend_atype >= 0).unsqueeze(1)), axis=-1 + ).reshape([nframes, nloc, 1]) + max_nnei = paddle.max(nnei, axis=1) + return min_rr2, max_nnei + + +class NeighborStat(BaseNeighborStat): + """Neighbor statistics using pure NumPy. + + Parameters + ---------- + ntypes : int + The num of atom types + rcut : float + The cut-off radius + mixed_type : bool, optional, default=False + Treat all types as a single type. + """ + + def __init__( + self, + ntypes: int, + rcut: float, + mixed_type: bool = False, + ) -> None: + super().__init__(ntypes, rcut, mixed_type) + op = NeighborStatOP(ntypes, rcut, mixed_type) + # self.op = paddle.jit.to_static(op) + self.op = op + self.auto_batch_size = AutoBatchSize() + + def iterator( + self, data: DeepmdDataSystem + ) -> Iterator[tuple[np.ndarray, float, str]]: + """Abstract method for producing data. + + Yields + ------ + np.ndarray + The maximal number of neighbors + float + The squared minimal distance between two atoms + str + The directory of the data system + """ + for ii in range(len(data.system_dirs)): + for jj in data.data_systems[ii].dirs: + data_set = data.data_systems[ii] + data_set_data = data_set._load_set(jj) + minrr2, max_nnei = self.auto_batch_size.execute_all( + self._execute, + data_set_data["coord"].shape[0], + data_set.get_natoms(), + data_set_data["coord"], + data_set_data["type"], + data_set_data["box"] if data_set.pbc else None, + ) + yield np.max(max_nnei, axis=0), np.min(minrr2), jj + + def _execute( + self, + coord: np.ndarray, + atype: np.ndarray, + cell: Optional[np.ndarray], + ): + """Execute the operation. + + Parameters + ---------- + coord + The coordinates of atoms. + atype + The atom types. + cell + The cell. + """ + with paddle.no_grad(): + minrr2, max_nnei = self.op( + paddle.to_tensor(coord, place=DEVICE), + paddle.to_tensor(atype, place=DEVICE), + paddle.to_tensor(cell, place=DEVICE) if cell is not None else None, + ) + minrr2 = minrr2.numpy() + max_nnei = max_nnei.numpy() + return minrr2, max_nnei diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py new file mode 100644 index 0000000000..851ff5293d --- /dev/null +++ b/deepmd/pd/utils/nlist.py @@ -0,0 +1,534 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import paddle + +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.region import ( + normalize_coord, + to_face_distance, +) + + +def extend_input_and_build_neighbor_list( + coord, + atype, + rcut: float, + sel: list[int], + mixed_types: bool = False, + box: Optional[paddle.Tensor] = None, +): + nframes, nloc = atype.shape[:2] + if box is not None: + box_gpu = box.to(coord.place) + coord_normalized = normalize_coord( + coord.reshape([nframes, nloc, 3]), + box_gpu.reshape([nframes, 3, 3]), + ) + else: + box_gpu = None + coord_normalized = coord.clone() + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box_gpu, rcut, box + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + rcut, + sel, + distinguish_types=(not mixed_types), + ) + extended_coord = extended_coord.reshape([nframes, -1, 3]) + return extended_coord, extended_atype, mapping, nlist + + +def build_neighbor_list( + coord: paddle.Tensor, + atype: paddle.Tensor, + nloc: int, + rcut: float, + sel: Union[int, list[int]], + distinguish_types: bool = True, +) -> paddle.Tensor: + """Build neightbor list for a single frame. keeps nsel neighbors. + + Parameters + ---------- + coord : paddle.Tensor + exptended coordinates of shape [batch_size, nall x 3] + atype : paddle.Tensor + extended atomic types of shape [batch_size, nall] + if type < 0 the atom is treat as virtual atoms. + nloc : int + number of local atoms. + rcut : float + cut-off radius + sel : int or List[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : paddle.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + are stored in an ascending order. If the number of + neighbors is less than nsel, the positions are masked + with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + For virtual atoms all neighboring positions are filled with -1. + + """ + batch_size = coord.shape[0] + coord = coord.reshape([batch_size, -1]) + nall = coord.shape[1] // 3 + # fill virtual atoms with large coords so they are not neighbors of any + # real atom. + if coord.numel() > 0: + xmax = paddle.max(coord) + 2.0 * rcut + else: + xmax = paddle.zeros([], dtype=coord.dtype).to(device=coord.place) + 2.0 * rcut + # nf x nall + is_vir = atype < 0 + coord1 = paddle.where( + is_vir[:, :, None], xmax, coord.reshape([batch_size, nall, 3]) + ).reshape([batch_size, nall * 3]) + if isinstance(sel, int): + sel = [sel] + # nloc x 3 + coord0 = coord1[:, : nloc * 3] + # nloc x nall x 3 + diff = coord1.reshape([batch_size, -1, 3]).unsqueeze(1) - coord0.reshape( + [batch_size, -1, 3] + ).unsqueeze(2) + if paddle.in_dynamic_mode(): + assert list(diff.shape) == [batch_size, nloc, nall, 3] + # nloc x nall + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + # if central atom has two zero distances, sorting sometimes can not exclude itself + rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).to(device=rr.place).unsqueeze(0) + rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) + # nloc x (nall-1) + rr = rr[:, :, 1:] + nlist = nlist[:, :, 1:] + t = _trim_mask_distinguish_nlist( + is_vir, atype, rr, nlist, rcut, sel, distinguish_types + ) + return t + + +def _trim_mask_distinguish_nlist( + is_vir_cntl: paddle.Tensor, + atype_neig: paddle.Tensor, + rr: paddle.Tensor, + nlist: paddle.Tensor, + rcut: float, + sel: list[int], + distinguish_types: bool, +) -> paddle.Tensor: + """Trim the size of nlist, mask if any central atom is virtual, distinguish types if necessary.""" + nsel = sum(sel) + # nloc x nsel + batch_size, nloc, nnei = rr.shape + if paddle.in_dynamic_mode(): + assert batch_size == is_vir_cntl.shape[0] + if nsel <= nnei: + rr = rr[:, :, :nsel] + nlist = nlist[:, :, :nsel] + else: + rr = paddle.concat( + [ + rr, + paddle.ones([batch_size, nloc, nsel - nnei]).to( + device=rr.place, dtype=rr.dtype + ) + + rcut, + ], # pylint: disable=no-explicit-dtype + axis=-1, + ) + nlist = paddle.concat( + [ + nlist, + paddle.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype).to( + device=rr.place + ), + ], + axis=-1, + ) + if paddle.in_dynamic_mode(): + assert list(nlist.shape) == [batch_size, nloc, nsel] + nlist = paddle.where( + paddle.logical_or((rr > rcut), is_vir_cntl[:, :nloc, None]), -1, nlist + ) + if distinguish_types: + return nlist_distinguish_types(nlist, atype_neig, sel) + else: + return nlist + + +def build_directional_neighbor_list( + coord_cntl: paddle.Tensor, + atype_cntl: paddle.Tensor, + coord_neig: paddle.Tensor, + atype_neig: paddle.Tensor, + rcut: float, + sel: Union[int, list[int]], + distinguish_types: bool = True, +) -> paddle.Tensor: + """Build directional neighbor list. + + With each central atom, all the neighbor atoms in the cut-off radius will + be recorded in the neighbor list. The maximum neighbors is nsel. If the real + number of neighbors is larger than nsel, the neighbors will be sorted with the + distance and the first nsel neighbors are kept. + + Important: the central and neighboring atoms are assume to be different atoms. + + Parameters + ---------- + coord_central : paddle.Tensor + coordinates of central atoms. assumed to be local atoms. + shape [batch_size, nloc_central x 3] + atype_central : paddle.Tensor + atomic types of central atoms. shape [batch_size, nloc_central] + if type < 0 the atom is treated as virtual atoms. + coord_neighbor : paddle.Tensor + extended coordinates of neighbors atoms. shape [batch_size, nall_neighbor x 3] + atype_central : paddle.Tensor + extended atomic types of neighbors atoms. shape [batch_size, nall_neighbor] + if type < 0 the atom is treated as virtual atoms. + rcut : float + cut-off radius + sel : int or List[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : paddle.Tensor + Neighbor list of shape [batch_size, nloc_central, nsel], the neighbors + are stored in an ascending order. If the number of neighbors is less than nsel, + the positions are masked with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + For virtual atoms all neighboring positions are filled with -1. + """ + batch_size = coord_cntl.shape[0] + coord_cntl = coord_cntl.reshape([batch_size, -1]) + nloc_cntl = coord_cntl.shape[1] // 3 + coord_neig = coord_neig.reshape([batch_size, -1]) + nall_neig = coord_neig.shape[1] // 3 + # fill virtual atoms with large coords so they are not neighbors of any + # real atom. + if coord_neig.numel() > 0: + xmax = paddle.max(coord_cntl) + 2.0 * rcut + else: + xmax = ( + paddle.zeros([1], dtype=coord_neig.dtype, device=coord_neig.place) + + 2.0 * rcut + ) + # nf x nloc + is_vir_cntl = atype_cntl < 0 + # nf x nall + is_vir_neig = atype_neig < 0 + # nf x nloc x 3 + coord_cntl = coord_cntl.reshape([batch_size, nloc_cntl, 3]) + # nf x nall x 3 + coord_neig = paddle.where( + is_vir_neig[:, :, None], xmax, coord_neig.reshape([batch_size, nall_neig, 3]) + ).reshape([batch_size, nall_neig, 3]) + # nsel + if isinstance(sel, int): + sel = [sel] + # nloc x nall x 3 + diff = coord_neig[:, None, :, :] - coord_cntl[:, :, None, :] + if paddle.in_dynamic_mode(): + assert list(diff.shape) == [batch_size, nloc_cntl, nall_neig, 3] + # nloc x nall + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) + + # We assume that the central and neighbor atoms are diffferent, + # thus we do not need to exclude self-neighbors. + # # if central atom has two zero distances, sorting sometimes can not exclude itself + # rr -= paddle.eye(nloc_cntl, nall_neig, dtype=rr.dtype, device=rr.place).unsqueeze(0) + # rr, nlist = paddle.sort(rr, axis=-1) + # # nloc x (nall-1) + # rr = rr[:, :, 1:] + # nlist = nlist[:, :, 1:] + + return _trim_mask_distinguish_nlist( + is_vir_cntl, atype_neig, rr, nlist, rcut, sel, distinguish_types + ) + + +def nlist_distinguish_types( + nlist: paddle.Tensor, + atype: paddle.Tensor, + sel: list[int], +): + """Given a nlist that does not distinguish atom types, return a nlist that + distinguish atom types. + + """ + nf, nloc, nnei = nlist.shape + ret_nlist = [] + # nloc x nall + tmp_atype = paddle.tile(atype.unsqueeze(1), [1, nloc, 1]) + mask = nlist == -1 + # nloc x s(nsel) + # tnlist = paddle.take_along_axis( + # tmp_atype, + # axis=2, + # indices=nlist.masked_fill(mask, 0), + # ) + tnlist = decomp.take_along_axis( + tmp_atype, + axis=2, + indices=nlist.masked_fill(mask, 0), + ) + tnlist = tnlist.masked_fill(mask, -1) + snsel = tnlist.shape[2] + for ii, ss in enumerate(sel): + # nloc x s(nsel) + # to int because bool cannot be sort on GPU + pick_mask = (tnlist == ii).to(paddle.int64) + # nloc x s(nsel), stable sort, nearer neighbors first + pick_mask, imap = ( + paddle.sort(pick_mask, axis=-1, descending=True, stable=True), + paddle.argsort(pick_mask, axis=-1, descending=True, stable=True), + ) + # nloc x s(nsel) + # inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) + inlist = decomp.take_along_axis(nlist, axis=2, indices=imap) + inlist = inlist.masked_fill(~(pick_mask.to(paddle.bool)), -1) + # nloc x nsel[ii] + ret_nlist.append(paddle.split(inlist, [ss, snsel - ss], axis=-1)[0]) + return paddle.concat(ret_nlist, axis=-1) + + +# build_neighbor_list = paddle.vmap( +# build_neighbor_list_lower, +# in_dims=(0,0,None,None,None), +# out_dims=(0), +# ) + + +def get_multiple_nlist_key( + rcut: float, + nsel: int, +) -> str: + return str(rcut) + "_" + str(nsel) + + +def build_multiple_neighbor_list( + coord: paddle.Tensor, + nlist: paddle.Tensor, + rcuts: list[float], + nsels: list[int], +) -> dict[str, paddle.Tensor]: + """Input one neighbor list, and produce multiple neighbor lists with + different cutoff radius and numbers of selection out of it. The + required rcuts and nsels should be smaller or equal to the input nlist. + + Parameters + ---------- + coord : paddle.Tensor + exptended coordinates of shape [batch_size, nall x 3] + nlist : paddle.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + should be stored in an ascending order. + rcuts : List[float] + list of cut-off radius in ascending order. + nsels : List[int] + maximal number of neighbors in ascending order. + + Returns + ------- + nlist_dict : Dict[str, paddle.Tensor] + A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) + value being the corresponding nlist. + + """ + if paddle.in_dynamic_mode(): + assert len(rcuts) == len(nsels) + if len(rcuts) == 0: + return {} + nb, nloc, nsel = nlist.shape + if nsel < nsels[-1]: + pad = -paddle.ones( + [nb, nloc, nsels[-1] - nsel], + dtype=nlist.dtype, + ).to(device=nlist.place) + # nb x nloc x nsel + nlist = paddle.concat([nlist, pad], axis=-1) + if paddle.is_tensor(nsel): + nsel = paddle.to_tensor(nsels[-1], dtype=nsel.dtype) + else: + nsel = nsels[-1] + + # nb x nall x 3 + coord1 = coord.reshape([nb, -1, 3]) + nall = coord1.shape[1] + # nb x nloc x 3 + coord0 = coord1[:, :nloc, :] + nlist_mask = nlist == -1 + # nb x (nloc x nsel) x 3 + index = ( + nlist.masked_fill(nlist_mask, 0) + .reshape([nb, nloc * nsel]) + .unsqueeze(-1) + .expand([-1, -1, 3]) + ) + # nb x nloc x nsel x 3 + # coord2 = paddle.take_along_axis(coord1, axis=1, index=index).reshape( + # [nb, nloc, nsel, 3] + # ) + coord2 = decomp.take_along_axis(coord1, axis=1, indices=index).reshape( + [nb, nloc, nsel, 3] + ) + # nb x nloc x nsel x 3 + diff = coord2 - coord0[:, :, None, :] + # nb x nloc x nsel + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + rr.masked_fill(nlist_mask, float("inf")) + nlist0 = nlist + ret = {} + for rc, ns in zip(rcuts[::-1], nsels[::-1]): + nlist0 = nlist0[:, :, :ns].masked_fill(rr[:, :, :ns] > rc, -1) + ret[get_multiple_nlist_key(rc, ns)] = nlist0 + return ret + + +def extend_coord_with_ghosts( + coord: paddle.Tensor, + atype: paddle.Tensor, + cell: Optional[paddle.Tensor], + rcut: float, + cell_cpu: Optional[paddle.Tensor] = None, +) -> tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Extend the coordinates of the atoms by appending peridoc images. + The number of images is large enough to ensure all the neighbors + within rcut are appended. + + Parameters + ---------- + coord : paddle.Tensor + original coordinates of shape [-1, nloc*3]. + atype : paddle.Tensor + atom type of shape [-1, nloc]. + cell : paddle.Tensor + simulation cell tensor of shape [-1, 9]. + rcut : float + the cutoff radius + cell_cpu : paddle.Tensor + cell on cpu for performance + + Returns + ------- + extended_coord: paddle.Tensor + extended coordinates of shape [-1, nall*3]. + extended_atype: paddle.Tensor + extended atom type of shape [-1, nall]. + index_mapping: paddle.Tensor + maping extended index to the local index + + """ + device = coord.place + nf, nloc = atype.shape[:2] + aidx = paddle.tile(paddle.arange(nloc).to(device=device).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype + if cell is None: + nall = nloc + extend_coord = coord.clone() + extend_atype = atype.clone() + extend_aidx = aidx.clone() + else: + coord = coord.reshape([nf, nloc, 3]) + cell = cell.reshape([nf, 3, 3]) + cell_cpu = cell_cpu.reshape([nf, 3, 3]) if cell_cpu is not None else cell + # nf x 3 + to_face = to_face_distance(cell_cpu) + # nf x 3 + # *2: ghost copies on + and - directions + # +1: central cell + nbuff = paddle.ceil(rcut / to_face) + INT64_MIN = -9223372036854775808 + nbuff = paddle.where( + paddle.isinf(nbuff), + paddle.full_like(nbuff, INT64_MIN, dtype=paddle.int64), + nbuff.astype(paddle.int64), + ) + # 3 + nbuff = paddle.amax(nbuff, axis=0) # faster than paddle.max + nbuff_cpu = nbuff.cpu() + xi = ( + paddle.arange(-nbuff_cpu[0], nbuff_cpu[0] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + yi = ( + paddle.arange(-nbuff_cpu[1], nbuff_cpu[1] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + zi = ( + paddle.arange(-nbuff_cpu[2], nbuff_cpu[2] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + eye_3 = ( + paddle.eye(3, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) + xyz = xi.reshape([-1, 1, 1, 1]) * eye_3[0] + xyz = xyz + yi.reshape([1, -1, 1, 1]) * eye_3[1] + xyz = xyz + zi.reshape([1, 1, -1, 1]) * eye_3[2] + xyz = xyz.reshape([-1, 3]) + # xyz = xyz.to(device=device) + # ns x 3 + # shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] + shift_idx = xyz[paddle.argsort(decomp.norm(xyz, axis=1))] + ns, _ = shift_idx.shape + nall = ns * nloc + # nf x ns x 3 + shift_vec = paddle.einsum("sd,fdk->fsk", shift_idx, cell) + # nf x ns x nloc x 3 + extend_coord = coord[:, None, :, :] + shift_vec[:, :, None, :] + # nf x ns x nloc + extend_atype = paddle.tile(atype.unsqueeze(-2), [1, ns, 1]) + # nf x ns x nloc + extend_aidx = paddle.tile(aidx.unsqueeze(-2), [1, ns, 1]) + return ( + extend_coord.reshape([nf, nall * 3]).to(device), + extend_atype.reshape([nf, nall]).to(device), + extend_aidx.reshape([nf, nall]).to(device), + ) diff --git a/deepmd/pd/utils/no_use_init.py b/deepmd/pd/utils/no_use_init.py new file mode 100644 index 0000000000..9f363d6db0 --- /dev/null +++ b/deepmd/pd/utils/no_use_init.py @@ -0,0 +1,515 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +""" +The initialization method under this module is aligned with pytorch initialization. +If you need to use the initialization method of PaddlePaddle, please refer to +[paddle.nn.initializer](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/nn/initializer). + +This code is based on [torch.nn.init](https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py) +Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file. +""" + +from __future__ import ( + annotations, +) + +import math +import warnings + +import numpy as np +import paddle +from paddle import ( + nn, +) +from typing_extensions import ( + Literal, +) + +__all__ = [ + "uniform_", + "normal_", + "trunc_normal_", + "glorot_normal_", + "constant_", + "ones_", + "zeros_", + "xavier_uniform_", + "xavier_normal_", + "kaiming_uniform_", + "kaiming_normal_", + "linear_init_", + "conv_init_", +] + + +def _no_grad_uniform_(tensor, a, b): + with paddle.no_grad(): + tensor.set_value( + paddle.uniform(shape=tensor.shape, dtype=tensor.dtype, min=a, max=b) + ) + return tensor + + +def _no_grad_normal_(tensor, mean=0.0, std=1.0): + with paddle.no_grad(): + tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape)) + return tensor + + +def _no_grad_trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + f"mean({mean}) is more than 2 std({std}) from [a, b]([{a}, {b}]) in _no_grad_trunc_normal_. " + "The distribution of values may be incorrect." + ) + + with paddle.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + _tensor = paddle.uniform( + shape=tensor.shape, dtype=tensor.dtype, min=2 * l - 1, max=2 * u - 1 + ) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + _tensor.erfinv_() + + # Transform to proper mean, std + _tensor = paddle.multiply( + _tensor, paddle.to_tensor(std * math.sqrt(2.0), tensor.dtype) + ) + _tensor = paddle.add(_tensor, paddle.to_tensor(mean, tensor.dtype)) + + # Clamp to ensure it"s in the proper range + _tensor = paddle.clip(_tensor, min=a, max=b) + tensor.set_value(_tensor) + return tensor + + +def _no_grad_fill_(tensor, value=0.0): + with paddle.no_grad(): + tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype)) + return tensor + + +def uniform_(tensor: paddle.Tensor, a: float, b: float) -> paddle.Tensor: + """Modify tensor inplace using uniform_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float): Min value. + b (float): Max value. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.uniform_(param, -1, 1) + """ + return _no_grad_uniform_(tensor, a, b) + + +def normal_( + tensor: paddle.Tensor, mean: float = 0.0, std: float = 1.0 +) -> paddle.Tensor: + """Modify tensor inplace using normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + mean (float, optional): Mean value. Defaults to 0.0. + std (float, optional): Std value. Defaults to 1.0. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.normal_(param, 0, 1) + """ + return _no_grad_normal_(tensor, mean, std) + + +def trunc_normal_( + tensor: paddle.Tensor, + mean: float = 0.0, + std: float = 1.0, + a: float = -2.0, + b: float = 2.0, +) -> paddle.Tensor: + """Modify tensor inplace using trunc_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + mean (float, optional): The mean of the normal distribution. Defaults to 0.0. + std (float, optional): The standard deviation of the normal distribution. Defaults to 1.0. + a (float, optional): The minimum cutoff value. Defaults to -2.0. + b (float, optional): The maximum cutoff value. Defaults to 2.0. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.trunc_normal_(param, 0.0, 1.0) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def constant_(tensor: paddle.Tensor, value: float = 0.0) -> paddle.Tensor: + """Modify tensor inplace using constant_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + value (float, optional): Value to fill tensor. Defaults to 0.0. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.constant_(param, 2) + """ + return _no_grad_fill_(tensor, value) + + +def ones_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using ones_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.ones_(param) + """ + return _no_grad_fill_(tensor, 1) + + +def zeros_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using zeros_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.zeros_(param) + """ + return _no_grad_fill_(tensor, 0) + + +def _calculate_fan_in_and_fan_out(tensor, reverse=False): + """ + Calculate (fan_in, _fan_out) for tensor. + + Args: + tensor (paddle.Tensor): paddle.Tensor. + reverse (bool): Tensor data format order, False by default as [fout, fin, ...]. + e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] + is True. + + Return: + Tuple[float, float]: (fan_in, fan_out). + """ + if tensor.ndim < 2: + raise ValueError( + f"tensor.ndim should be no less than 2, but got {tensor.ndim}." + ) + + if reverse: + num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] + else: + num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] + + receptive_field_size = 1 + if tensor.ndim > 2: + receptive_field_size = np.prod(tensor.shape[2:]) + + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def xavier_uniform_( + tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False +) -> paddle.Tensor: + """Modify tensor inplace using xavier_uniform_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + gain (float, optional): Hyperparameter. Defaults to 1.0. + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.xavier_uniform_(param) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def xavier_normal_( + tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False +) -> paddle.Tensor: + """Modify tensor inplace using xavier_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + gain (float, optional): Hyperparameter. Defaults to 1.0. + reverse (bool, optional): Tensor data format order, False by + default as [fout, fin, ...]. Defaults to False. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.xavier_normal_(param) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + return _no_grad_normal_(tensor, 0, std) + + +# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html +def _calculate_correct_fan(tensor, mode, reverse=False): + mode = mode.lower() + valid_modes = ["fan_in", "fan_out"] + if mode not in valid_modes: + raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) + + return fan_in if mode == "fan_in" else fan_out + + +def _calculate_gain(nonlinearity, param=None): + linear_fns = [ + "linear", + "conv1d", + "conv2d", + "conv3d", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + ] + if nonlinearity in linear_fns or nonlinearity == "sigmoid": + return 1 + elif nonlinearity == "tanh": + return 5.0 / 3 + elif nonlinearity == "relu": + return math.sqrt(2.0) + elif nonlinearity == "leaky_relu": + if param is None: + negative_slope = 0.01 + elif ( + not isinstance(param, bool) + and isinstance(param, int) + or isinstance(param, float) + ): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError(f"negative_slope {param} not a valid number") + return math.sqrt(2.0 / (1 + negative_slope**2)) + elif nonlinearity == "selu": + return 3.0 / 4 + else: + raise ValueError(f"Unsupported nonlinearity {nonlinearity}") + + +def kaiming_uniform_( + tensor: paddle.Tensor, + a: float = 0, + mode: Literal["fan_in", "fan_out"] = "fan_in", + nonlinearity: str = "leaky_relu", + reverse: bool = False, +) -> paddle.Tensor: + """Modify tensor inplace using kaiming_uniform method. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float, optional): The negative slope of the rectifier used after this layer. + Defaults to 0. + mode (Literal["fan_in", "fan_out"], optional): + ["fan_in", "fan_out"]. Defaults to "fan_in". + nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.kaiming_uniform_(param) + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def kaiming_normal_( + tensor: paddle.Tensor, + a: float = 0, + mode: Literal["fan_in", "fan_out"] = "fan_in", + nonlinearity: str = "leaky_relu", + reverse: bool = False, +) -> paddle.Tensor: + """Modify tensor inplace using kaiming_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float, optional): The negative slope of the rectifier used after this layer. + Defaults to 0. + mode (Literal["fan_in", "fan_out"], optional): Either + 'fan_in' (default) or 'fan_out'. Defaults to "fan_in". + nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". + reverse (bool, optional): Tensor data format order. Defaults to False. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.kaiming_normal_(param) + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + return _no_grad_normal_(tensor, 0, std) + + +def linear_init_(module: nn.Layer) -> None: + """Initialize module's weight and bias as it is a linear layer. + + Args: + module (nn.Layer): Linear Layer to be initialized. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> layer = paddle.nn.Linear(128, 256) + >>> ppsci.utils.initializer.linear_init_(layer) + """ + kaiming_uniform_(module.weight, a=math.sqrt(5)) + if module.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=True) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + uniform_(module.bias, -bound, bound) + + +def conv_init_(module: nn.Layer) -> None: + """Initialize module's weight and bias as it is a conv layer. + + Args: + module (nn.Layer): Convolution Layer to be initialized. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> layer = paddle.nn.Conv2D(4, 16, 2) + >>> ppsci.utils.initializer.conv_init_(layer) + """ + kaiming_uniform_(module.weight, a=math.sqrt(5)) + if module.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=False) + if fan_in != 0: + bound = 1 / math.sqrt(fan_in) + uniform_(module.bias, -bound, bound) + + +def glorot_normal_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using jax-style glorot_normal. + + Args: + tensor (paddle.Tensor): Paddle Tensor/Paramter. + + Returns + ------- + paddle.Tensor: Initialized tensor. + + Examples + -------- + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.glorot_normal_(param) + """ + assert ( + tensor.ndim == 2 + ), f"glorot_normal_ only support 2D tensor now, but got ndim={tensor.ndim}" + fin, fout = tensor.shape + var = 2.0 / (fin + fout) + stddev = math.sqrt(var) * 0.87962566103423978 + trunc_normal_(tensor) + tensor.set_value(tensor * stddev) + return tensor diff --git a/deepmd/pd/utils/plugin.py b/deepmd/pd/utils/plugin.py new file mode 100644 index 0000000000..aa901c06e8 --- /dev/null +++ b/deepmd/pd/utils/plugin.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Base of plugin systems.""" + +from deepmd.utils.plugin import ( + Plugin, + PluginVariant, + VariantABCMeta, + VariantMeta, +) + +__all__ = [ + "Plugin", + "VariantMeta", + "VariantABCMeta", + "PluginVariant", +] diff --git a/deepmd/pd/utils/preprocess.py b/deepmd/pd/utils/preprocess.py new file mode 100644 index 0000000000..052d9941f8 --- /dev/null +++ b/deepmd/pd/utils/preprocess.py @@ -0,0 +1,314 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from typing import ( + Union, +) + +import paddle + +from deepmd.pd.utils import ( + decomp, + env, +) + +log = logging.getLogger(__name__) + + +class Region3D: + def __init__(self, boxt): + """Construct a simulation box.""" + boxt = boxt.reshape([3, 3]) + self.boxt = boxt # convert physical coordinates to internal ones + self.rec_boxt = paddle.linalg.inv( + self.boxt + ) # convert internal coordinates to physical ones + + self.volume = paddle.linalg.det(self.boxt) # compute the volume + + # boxt = boxt.permute(1, 0) + c_yz = paddle.cross(boxt[1], boxt[2]) + # self._h2yz = self.volume / paddle.linalg.norm(c_yz) + self._h2yz = self.volume / decomp.norm(c_yz) + c_zx = paddle.cross(boxt[2], boxt[0]) + # self._h2zx = self.volume / paddle.linalg.norm(c_zx) + self._h2zx = self.volume / decomp.norm(c_zx) + c_xy = paddle.cross(boxt[0], boxt[1]) + # self._h2xy = self.volume / paddle.linalg.norm(c_xy) + self._h2xy = self.volume / decomp.norm(c_xy) + + def phys2inter(self, coord): + """Convert physical coordinates to internal ones.""" + return coord @ self.rec_boxt + + def inter2phys(self, coord): + """Convert internal coordinates to physical ones.""" + return coord @ self.boxt + + def get_face_distance(self): + """Return face distinces to each surface of YZ, ZX, XY.""" + return paddle.stack([self._h2yz, self._h2zx, self._h2xy]) + + +def normalize_coord(coord, region: Region3D, nloc: int): + """Move outer atoms into region by mirror. + + Args: + - coord: shape is [nloc*3] + """ + tmp_coord = coord.clone() + inter_cood = paddle.remainder(region.phys2inter(tmp_coord), 1.0) + tmp_coord = region.inter2phys(inter_cood) + return tmp_coord + + +def compute_serial_cid(cell_offset, ncell): + """Tell the sequential cell ID in its 3D space. + + Args: + - cell_offset: shape is [3] + - ncell: shape is [3] + """ + cell_offset[:, 0] *= ncell[1] * ncell[2] + cell_offset[:, 1] *= ncell[2] + return cell_offset.sum(-1) + + +def compute_pbc_shift(cell_offset, ncell): + """Tell shift count to move the atom into region.""" + shift = paddle.zeros_like(cell_offset) + shift = shift + (cell_offset < 0) * -( + paddle.floor(paddle.divide(cell_offset, ncell)) + ) + shift = shift + (cell_offset >= ncell) * -( + paddle.floor(paddle.divide((cell_offset - ncell), ncell)) + 1 + ) + assert paddle.all(cell_offset + shift * ncell >= 0) + assert paddle.all(cell_offset + shift * ncell < ncell) + return shift + + +def build_inside_clist(coord, region: Region3D, ncell): + """Build cell list on atoms inside region. + + Args: + - coord: shape is [nloc*3] + - ncell: shape is [3] + """ + loc_ncell = int(paddle.prod(ncell)) # num of local cells + nloc = coord.numel() // 3 # num of local atoms + inter_cell_size = 1.0 / ncell + + inter_cood = region.phys2inter(coord.reshape([-1, 3])) + cell_offset = paddle.floor(inter_cood / inter_cell_size).to(paddle.int64) + # numerical error brought by conversion from phys to inter back and force + # may lead to negative value + cell_offset[cell_offset < 0] = 0 + delta = cell_offset - ncell + a2c = compute_serial_cid(cell_offset, ncell) # cell id of atoms + arange = paddle.arange(0, loc_ncell, 1) # pylint: disable=no-explicit-dtype,no-explicit-device + cellid = a2c == arange.unsqueeze(-1) # one hot cellid + c2a = cellid.nonzero() + lst = [] + cnt = 0 + bincount = paddle.bincount(a2c, minlength=loc_ncell) + for i in range(loc_ncell): + n = bincount[i] + lst.append(c2a[cnt : cnt + n, 1]) + cnt += n + return a2c, lst + + +def append_neighbors(coord, region: Region3D, atype, rcut: float): + """Make ghost atoms who are valid neighbors. + + Args: + - coord: shape is [nloc*3] + - atype: shape is [nloc] + """ + to_face = region.get_face_distance() + + # compute num and size of local cells + ncell = paddle.floor(to_face / rcut).to(paddle.int64) + ncell[ncell == 0] = 1 + cell_size = to_face / ncell + ngcell = ( + paddle.floor(rcut / cell_size).to(paddle.int64) + 1 + ) # num of cells out of local, which contain ghost atoms + + # add ghost atoms + a2c, c2a = build_inside_clist(coord, region, ncell) + xi = paddle.arange(-ngcell[0], ncell[0] + ngcell[0], 1) # pylint: disable=no-explicit-dtype,no-explicit-device + yi = paddle.arange(-ngcell[1], ncell[1] + ngcell[1], 1) # pylint: disable=no-explicit-dtype,no-explicit-device + zi = paddle.arange(-ngcell[2], ncell[2] + ngcell[2], 1) # pylint: disable=no-explicit-dtype,no-explicit-device + xyz = xi.reshape([-1, 1, 1, 1]) * paddle.to_tensor([1, 0, 0], dtype=paddle.int64) # pylint: disable=no-explicit-device + xyz = xyz + yi.reshape([1, -1, 1, 1]) * paddle.to_tensor( + [0, 1, 0], dtype=paddle.int64 + ) # pylint: disable=no-explicit-device + xyz = xyz + zi.reshape([1, 1, -1, 1]) * paddle.to_tensor( + [0, 0, 1], dtype=paddle.int64 + ) # pylint: disable=no-explicit-device + xyz = xyz.reshape([-1, 3]) + mask_a = (xyz >= 0).all(axis=-1) + mask_b = (xyz < ncell).all(axis=-1) + mask = ~paddle.logical_and(mask_a, mask_b) + xyz = xyz[mask] # cell coord + shift = compute_pbc_shift(xyz, ncell) + coord_shift = region.inter2phys(shift.to(env.GLOBAL_PD_FLOAT_PRECISION)) + mirrored = shift * ncell + xyz + cid = compute_serial_cid(mirrored, ncell) + + n_atoms = coord.shape[0] + aid = [c2a[ci] + i * n_atoms for i, ci in enumerate(cid)] + aid = paddle.concat(aid) + tmp = paddle.trunc(paddle.divide(aid, n_atoms)) + aid = aid % n_atoms + tmp_coord = coord[aid] - coord_shift[tmp] + tmp_atype = atype[aid] + + # merge local and ghost atoms + merged_coord = paddle.concat([coord, tmp_coord]) + merged_coord_shift = paddle.concat([paddle.zeros_like(coord), coord_shift[tmp]]) + merged_atype = paddle.concat([atype, tmp_atype]) + merged_mapping = paddle.concat([paddle.arange(atype.numel()), aid]) # pylint: disable=no-explicit-dtype,no-explicit-device + return merged_coord_shift, merged_atype, merged_mapping + + +def build_neighbor_list( + nloc: int, coord, atype, rcut: float, sec, mapping, type_split=True, min_check=False +): + """For each atom inside region, build its neighbor list. + + Args: + - coord: shape is [nall*3] + - atype: shape is [nall] + """ + nall = coord.numel() // 3 + coord = coord.astype(paddle.get_default_dtype()) + nlist = [[] for _ in range(nloc)] + coord_l = coord.reshape([-1, 1, 3])[:nloc] + coord_r = coord.reshape([1, -1, 3]) + distance = coord_l - coord_r + # distance = paddle.linalg.norm(distance, axis=-1) + distance = decomp.norm(distance, axis=-1) + DISTANCE_INF = distance.max().detach() + rcut + distance[:nloc, :nloc] += paddle.eye(nloc, dtype=paddle.bool) * DISTANCE_INF # pylint: disable=no-explicit-device + if min_check: + if distance.min().abs() < 1e-6: + raise RuntimeError("Atom dist too close!") + if not type_split: + sec = sec[-1:] + lst = [] + nlist = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device + nlist_loc = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device + nlist_type = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device + for i, nnei in enumerate(sec): + if i > 0: + nnei = nnei - sec[i - 1] + if not type_split: + tmp = distance + else: + mask = atype.unsqueeze(0) == i + tmp = distance + (~mask) * DISTANCE_INF + if tmp.shape[1] >= nnei: + _sorted, indices = paddle.topk(tmp, nnei, axis=1, largest=False) + else: + # when nnei > nall + indices = paddle.zeros((nloc, nnei)).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device + _sorted = paddle.ones((nloc, nnei)).long() * DISTANCE_INF # pylint: disable=no-explicit-dtype,no-explicit-device + _sorted_nnei, indices_nnei = paddle.topk( + tmp, tmp.shape[1], axis=1, largest=False + ) + _sorted[:, : tmp.shape[1]] = _sorted_nnei + indices[:, : tmp.shape[1]] = indices_nnei + mask = (_sorted < rcut).to(paddle.int64) + indices_loc = mapping[indices] + indices = indices * mask + -1 * (1 - mask) # -1 for padding + indices_loc = indices_loc * mask + -1 * (1 - mask) # -1 for padding + if i == 0: + start = 0 + else: + start = sec[i - 1] + end = min(sec[i], start + indices.shape[1]) + nlist[:, start:end] = indices[:, :nnei] + nlist_loc[:, start:end] = indices_loc[:, :nnei] + nlist_type[:, start:end] = atype[indices[:, :nnei]] * mask + -1 * (1 - mask) + return nlist, nlist_loc, nlist_type + + +def compute_smooth_weight(distance, rmin: float, rmax: float): + """Compute smooth weight for descriptor elements.""" + if rmin >= rmax: + raise ValueError("rmin should be less than rmax.") + min_mask = distance <= rmin + max_mask = distance >= rmax + mid_mask = paddle.logical_not(paddle.logical_or(min_mask, max_mask)) + uu = (distance - rmin) / (rmax - rmin) + vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 + return vv * mid_mask.astype(vv.dtype) + min_mask.astype(vv.dtype) + + +def make_env_mat( + coord, + atype, + region, + rcut: Union[float, list], + sec, + pbc=True, + type_split=True, + min_check=False, +): + """Based on atom coordinates, return environment matrix. + + Returns + ------- + nlist: nlist, [nloc, nnei] + merged_coord_shift: shift on nall atoms, [nall, 3] + merged_mapping: mapping from nall index to nloc index, [nall] + """ + # move outer atoms into cell + hybrid = isinstance(rcut, list) + _rcut = rcut + if hybrid: + _rcut = max(rcut) + if pbc: + merged_coord_shift, merged_atype, merged_mapping = append_neighbors( + coord, region, atype, _rcut + ) + merged_coord = coord[merged_mapping] - merged_coord_shift + if merged_coord.shape[0] <= coord.shape[0]: + log.warning("No ghost atom is added for system ") + else: + merged_coord_shift = paddle.zeros_like(coord) + merged_atype = atype.clone() + merged_mapping = paddle.arange(atype.numel()) # pylint: disable=no-explicit-dtype,no-explicit-device + merged_coord = coord.clone() + + # build nlist + if not hybrid: + nlist, nlist_loc, nlist_type = build_neighbor_list( + coord.shape[0], + merged_coord, + merged_atype, + rcut, + sec, + merged_mapping, + type_split=type_split, + min_check=min_check, + ) + else: + nlist, nlist_loc, nlist_type = [], [], [] + for ii, single_rcut in enumerate(rcut): + nlist_tmp, nlist_loc_tmp, nlist_type_tmp = build_neighbor_list( + coord.shape[0], + merged_coord, + merged_atype, + single_rcut, + sec[ii], + merged_mapping, + type_split=type_split, + min_check=min_check, + ) + nlist.append(nlist_tmp) + nlist_loc.append(nlist_loc_tmp) + nlist_type.append(nlist_type_tmp) + return nlist, nlist_loc, nlist_type, merged_coord_shift, merged_mapping diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py new file mode 100644 index 0000000000..160a4d124e --- /dev/null +++ b/deepmd/pd/utils/region.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.pd.utils import ( + decomp, +) + + +def phys2inter( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Convert physical coordinates to internal(direct) coordinates. + + Parameters + ---------- + coord : paddle.Tensor + physical coordinates of shape [*, na, 3]. + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + inter_coord: paddle.Tensor + the internal coordinates + + """ + if paddle.in_dynamic_mode(): + try: + rec_cell = paddle.linalg.inv(cell) + except Exception as e: + rec_cell = paddle.full_like(cell, float("nan")) + rec_cell.stop_gradient = cell.stop_gradient + else: + rec_cell = paddle.linalg.inv(cell) + return paddle.matmul(coord, rec_cell) + + +def inter2phys( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Convert internal(direct) coordinates to physical coordinates. + + Parameters + ---------- + coord : paddle.Tensor + internal coordinates of shape [*, na, 3]. + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + phys_coord: paddle.Tensor + the physical coordinates + + """ + return paddle.matmul(coord, cell) + + +def to_face_distance( + cell: paddle.Tensor, +) -> paddle.Tensor: + """Compute the to-face-distance of the simulation cell. + + Parameters + ---------- + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + dist: paddle.Tensor + the to face distances of shape [*, 3] + + """ + cshape = cell.shape + dist = b_to_face_distance(cell.reshape([-1, 3, 3])) + return dist.reshape(list(cshape[:-2]) + [3]) # noqa:RUF005 + + +def _to_face_distance(cell): + volume = paddle.linalg.det(cell) + c_yz = paddle.cross(cell[1], cell[2]) + # _h2yz = volume / paddle.linalg.norm(c_yz) + _h2yz = volume / decomp.norm(c_yz) + c_zx = paddle.cross(cell[2], cell[0]) + # _h2zx = volume / paddle.linalg.norm(c_zx) + _h2zx = volume / decomp.norm(c_zx) + c_xy = paddle.cross(cell[0], cell[1]) + # _h2xy = volume / paddle.linalg.norm(c_xy) + _h2xy = volume / decomp.norm(c_xy) + return paddle.stack([_h2yz, _h2zx, _h2xy]) + + +def b_to_face_distance(cell): + volume = paddle.linalg.det(cell) + c_yz = paddle.cross(cell[:, 1], cell[:, 2], axis=-1) + # _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) + _h2yz = volume / decomp.norm(c_yz, axis=-1) + c_zx = paddle.cross(cell[:, 2], cell[:, 0], axis=-1) + # _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) + _h2zx = volume / decomp.norm(c_zx, axis=-1) + c_xy = paddle.cross(cell[:, 0], cell[:, 1], axis=-1) + # _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) + _h2xy = volume / decomp.norm(c_xy, axis=-1) + return paddle.stack([_h2yz, _h2zx, _h2xy], axis=1) + + +# b_to_face_distance = paddle.vmap( +# _to_face_distance, in_dims=(0), out_dims=(0)) + + +def normalize_coord( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Apply PBC according to the atomic coordinates. + + Parameters + ---------- + coord : paddle.Tensor + orignal coordinates of shape [*, na, 3]. + + Returns + ------- + wrapped_coord: paddle.Tensor + wrapped coordinates of shape [*, na, 3]. + + """ + icoord = phys2inter(coord, cell) + icoord = paddle.remainder(icoord, paddle.to_tensor(1.0)) + return inter2phys(icoord, cell) diff --git a/deepmd/pd/utils/serialization.py b/deepmd/pd/utils/serialization.py new file mode 100644 index 0000000000..7beb459c0c --- /dev/null +++ b/deepmd/pd/utils/serialization.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +def serialize_from_file(model_file: str) -> dict: + """Serialize the model file to a dictionary. + + Parameters + ---------- + model_file : str + The model file to be serialized. + + Returns + ------- + dict + The serialized model data. + """ + raise NotImplementedError("Paddle do not support jit.export yet.") + + +def deserialize_to_file(model_file: str, data: dict) -> None: + """Deserialize the dictionary to a model file. + + Parameters + ---------- + model_file : str + The model file to be saved. + data : dict + The dictionary to be deserialized. + """ + raise NotImplementedError("Paddle do not support jit.export yet.") diff --git a/deepmd/pd/utils/stat.py b/deepmd/pd/utils/stat.py new file mode 100644 index 0000000000..3ecd695038 --- /dev/null +++ b/deepmd/pd/utils/stat.py @@ -0,0 +1,604 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from collections import ( + defaultdict, +) +from typing import ( + Callable, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, +) +from deepmd.pd.utils import ( + AtomExcludeMask, +) +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.utils import ( + dict_to_device, + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.out_stat import ( + compute_stats_from_atomic, + compute_stats_from_redu, +) +from deepmd.utils.path import ( + DPPath, +) + +log = logging.getLogger(__name__) + + +def make_stat_input(datasets, dataloaders, nbatches): + """Pack data for statistics. + + Args: + - dataset: A list of dataset to analyze. + - nbatches: Batch count for collecting stats. + + Returns + ------- + - a list of dicts, each of which contains data from a system + """ + lst = [] + log.info(f"Packing data for statistics from {len(datasets)} systems") + for i in range(len(datasets)): + sys_stat = {} + + iterator = iter(dataloaders[i]) + numb_batches = min(nbatches, len(dataloaders[i])) + for _ in range(numb_batches): + try: + stat_data = next(iterator) + except StopIteration: + iterator = iter(dataloaders[i]) + stat_data = next(iterator) + for dd in stat_data: + if stat_data[dd] is None: + sys_stat[dd] = None + elif isinstance(stat_data[dd], paddle.Tensor): + if dd not in sys_stat: + sys_stat[dd] = [] + sys_stat[dd].append(stat_data[dd]) + elif isinstance(stat_data[dd], np.float32): + sys_stat[dd] = stat_data[dd] + else: + pass + + for key in sys_stat: + if isinstance(sys_stat[key], np.float32): + pass + elif sys_stat[key] is None or sys_stat[key][0] is None: + sys_stat[key] = None + elif isinstance(stat_data[dd], paddle.Tensor): + sys_stat[key] = paddle.concat(sys_stat[key], axis=0) + dict_to_device(sys_stat) + lst.append(sys_stat) + return lst + + +def _restore_from_file( + stat_file_path: DPPath, + keys: list[str] = ["energy"], +) -> Optional[dict]: + if stat_file_path is None: + return None, None + stat_files = [stat_file_path / f"bias_atom_{kk}" for kk in keys] + if all(not (ii.is_file()) for ii in stat_files): + return None, None + stat_files = [stat_file_path / f"std_atom_{kk}" for kk in keys] + if all(not (ii.is_file()) for ii in stat_files): + return None, None + + ret_bias = {} + ret_std = {} + for kk in keys: + fp = stat_file_path / f"bias_atom_{kk}" + # only read the key that exists + if fp.is_file(): + ret_bias[kk] = fp.load_numpy() + for kk in keys: + fp = stat_file_path / f"std_atom_{kk}" + # only read the key that exists + if fp.is_file(): + ret_std[kk] = fp.load_numpy() + return ret_bias, ret_std + + +def _save_to_file( + stat_file_path: DPPath, + bias_out: dict, + std_out: dict, +): + assert stat_file_path is not None + stat_file_path.mkdir(exist_ok=True, parents=True) + for kk, vv in bias_out.items(): + fp = stat_file_path / f"bias_atom_{kk}" + fp.save_numpy(vv) + for kk, vv in std_out.items(): + fp = stat_file_path / f"std_atom_{kk}" + fp.save_numpy(vv) + + +def _post_process_stat( + out_bias, + out_std, +): + """Post process the statistics. + + For global statistics, we do not have the std for each type of atoms, + thus fake the output std by ones for all the types. + + """ + new_std = {} + for kk, vv in out_bias.items(): + new_std[kk] = np.ones_like(vv) + return out_bias, new_std + + +def _compute_model_predict( + sampled: Union[Callable[[], list[dict]], list[dict]], + keys: list[str], + model_forward: Callable[..., paddle.Tensor], +): + auto_batch_size = AutoBatchSize() + model_predict = {kk: [] for kk in keys} + for system in sampled: + nframes = system["coord"].shape[0] + coord, atype, box, natoms = ( + system["coord"], + system["atype"], + system["box"], + system["natoms"], + ) + fparam = system.get("fparam", None) + aparam = system.get("aparam", None) + + def model_forward_auto_batch_size(*args, **kwargs): + return auto_batch_size.execute_all( + model_forward, + nframes, + system["atype"].shape[-1], + *args, + **kwargs, + ) + + sample_predict = model_forward_auto_batch_size( + coord, atype, box, fparam=fparam, aparam=aparam + ) + for kk in keys: + model_predict[kk].append( + to_numpy_array( + sample_predict[kk] # nf x nloc x odims + ) + ) + return model_predict + + +def _make_preset_out_bias( + ntypes: int, + ibias: list[Optional[np.array]], +) -> Optional[np.array]: + """Make preset out bias. + + output: + a np array of shape [ntypes, *(odim0, odim1, ...)] is any item is not None + None if all items are None. + """ + if len(ibias) != ntypes: + raise ValueError("the length of preset bias list should be ntypes") + if all(ii is None for ii in ibias): + return None + for refb in ibias: + if refb is not None: + break + refb = np.array(refb) + nbias = [ + np.full_like(refb, np.nan, dtype=np.float64) if ii is None else ii + for ii in ibias + ] + return np.array(nbias) + + +def _fill_stat_with_global( + atomic_stat: Union[np.ndarray, None], + global_stat: np.ndarray, +): + """This function is used to fill atomic stat with global stat. + + Parameters + ---------- + atomic_stat : Union[np.ndarray, None] + The atomic stat. + global_stat : np.ndarray + The global stat. + if the atomic stat is None, use global stat. + if the atomic stat is not None, but has nan values (missing atypes), fill with global stat. + """ + if atomic_stat is None: + return global_stat + else: + atomic_stat = atomic_stat.reshape(global_stat.shape) + return np.nan_to_num( + np.where( + np.isnan(atomic_stat) & ~np.isnan(global_stat), global_stat, atomic_stat + ) + ) + + +def compute_output_stats( + merged: Union[Callable[[], list[dict]], list[dict]], + ntypes: int, + keys: Union[str, list[str]] = ["energy"], + stat_file_path: Optional[DPPath] = None, + rcond: Optional[float] = None, + preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, + model_forward: Optional[Callable[..., paddle.Tensor]] = None, + atomic_output: Optional[FittingOutputDef] = None, +): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], List[dict]], List[dict]] + - List[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + ntypes : int + The number of atom types. + stat_file_path : DPPath, optional + The path to the stat file. + rcond : float, optional + The condition number for the regression of atomic energy. + preset_bias : Dict[str, List[Optional[paddle.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. + model_forward : Callable[..., paddle.Tensor], optional + The wrapped forward function of atomic model. + If not None, the model will be utilized to generate the original energy prediction, + which will be subtracted from the energy label of the data. + The difference will then be used to calculate the delta complement energy bias for each type. + atomic_output : FittingOutputDef, optional + The output of atomic model. + """ + # try to restore the bias from stat file + bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) + + # failed to restore the bias from stat file. compute + if bias_atom_e is None: + # only get data once, sampled is a list of dict[str, paddle.Tensor] + sampled = merged() if callable(merged) else merged + if model_forward is not None: + model_pred = _compute_model_predict(sampled, keys, model_forward) + else: + model_pred = None + + # remove the keys that are not in the sample + keys = [keys] if isinstance(keys, str) else keys + assert isinstance(keys, list) + new_keys = [ + ii + for ii in keys + if (ii in sampled[0].keys()) or ("atom_" + ii in sampled[0].keys()) + ] + del keys + keys = new_keys + # split system based on label + atomic_sampled_idx = defaultdict(list) + global_sampled_idx = defaultdict(list) + + for kk in keys: + for idx, system in enumerate(sampled): + if (("find_atom_" + kk) in system) and ( + system["find_atom_" + kk] > 0.0 + ): + atomic_sampled_idx[kk].append(idx) + elif (("find_" + kk) in system) and (system["find_" + kk] > 0.0): + global_sampled_idx[kk].append(idx) + + else: + continue + + # use index to gather model predictions for the corresponding systems. + + model_pred_g = ( + { + kk: [ + np.sum(vv[idx], axis=1) for idx in global_sampled_idx[kk] + ] # sum atomic dim + for kk, vv in model_pred.items() + } + if model_pred + else None + ) + model_pred_a = ( + { + kk: [vv[idx] for idx in atomic_sampled_idx[kk]] + for kk, vv in model_pred.items() + } + if model_pred + else None + ) + + # concat all frames within those systems + model_pred_g = ( + { + kk: np.concatenate(model_pred_g[kk]) + for kk in model_pred_g.keys() + if len(model_pred_g[kk]) > 0 + } + if model_pred + else None + ) + model_pred_a = ( + { + kk: np.concatenate(model_pred_a[kk]) + for kk in model_pred_a.keys() + if len(model_pred_a[kk]) > 0 + } + if model_pred + else None + ) + + # compute stat + bias_atom_g, std_atom_g = compute_output_stats_global( + sampled, + ntypes, + keys, + rcond, + preset_bias, + model_pred_g, + atomic_output, + ) + bias_atom_a, std_atom_a = compute_output_stats_atomic( + sampled, + ntypes, + keys, + model_pred_a, + ) + + # merge global/atomic bias + bias_atom_e, std_atom_e = {}, {} + for kk in keys: + # use atomic bias whenever available + if kk in bias_atom_a: + bias_atom_e[kk] = bias_atom_a[kk] + std_atom_e[kk] = std_atom_a[kk] + else: + bias_atom_e[kk] = None + std_atom_e[kk] = None + # use global bias to fill missing atomic bias + if kk in bias_atom_g: + bias_atom_e[kk] = _fill_stat_with_global( + bias_atom_e[kk], bias_atom_g[kk] + ) + std_atom_e[kk] = _fill_stat_with_global(std_atom_e[kk], std_atom_g[kk]) + if (bias_atom_e[kk] is None) or (std_atom_e[kk] is None): + raise RuntimeError("Fail to compute stat.") + + if stat_file_path is not None: + _save_to_file(stat_file_path, bias_atom_e, std_atom_e) + + bias_atom_e = {kk: to_paddle_tensor(vv) for kk, vv in bias_atom_e.items()} + std_atom_e = {kk: to_paddle_tensor(vv) for kk, vv in std_atom_e.items()} + return bias_atom_e, std_atom_e + + +def compute_output_stats_global( + sampled: list[dict], + ntypes: int, + keys: list[str], + rcond: Optional[float] = None, + preset_bias: Optional[dict[str, list[Optional[paddle.Tensor]]]] = None, + model_pred: Optional[dict[str, np.ndarray]] = None, + atomic_output: Optional[FittingOutputDef] = None, +): + """This function only handle stat computation from reduced global labels.""" + # return directly if model predict is empty for global + if model_pred == {}: + return {}, {} + + # get label dict from sample; for each key, only picking the system with global labels. + outputs = { + kk: [ + system[kk] + for system in sampled + if kk in system and system.get(f"find_{kk}", 0) > 0 + ] + for kk in keys + } + + data_mixed_type = "real_natoms_vec" in sampled[0] + natoms_key = "natoms" if not data_mixed_type else "real_natoms_vec" + for system in sampled: + if "atom_exclude_types" in system: + type_mask = AtomExcludeMask( + ntypes, system["atom_exclude_types"] + ).get_type_mask() + system[natoms_key][:, 2:] *= type_mask.unsqueeze(0) + + input_natoms = { + kk: [ + item[natoms_key] + for item in sampled + if kk in item and item.get(f"find_{kk}", 0) > 0 + ] + for kk in keys + } + # shape: (nframes, ndim) + merged_output = { + kk: to_numpy_array(paddle.concat(outputs[kk])) + for kk in keys + if len(outputs[kk]) > 0 + } + # shape: (nframes, ntypes) + + merged_natoms = { + kk: to_numpy_array(paddle.concat(input_natoms[kk])[:, 2:]) + for kk in keys + if len(input_natoms[kk]) > 0 + } + nf = {kk: merged_natoms[kk].shape[0] for kk in keys if kk in merged_natoms} + if preset_bias is not None: + assigned_atom_ener = { + kk: _make_preset_out_bias(ntypes, preset_bias[kk]) + if kk in preset_bias.keys() + else None + for kk in keys + } + else: + assigned_atom_ener = {kk: None for kk in keys} + + if model_pred is None: + stats_input = merged_output + else: + # subtract the model bias and output the delta bias + + stats_input = { + kk: merged_output[kk] - model_pred[kk] for kk in keys if kk in merged_output + } + + bias_atom_e = {} + std_atom_e = {} + for kk in keys: + if kk in stats_input: + if atomic_output is not None and atomic_output.get_data()[kk].intensive: + task_dim = stats_input[kk].shape[1] + assert merged_natoms[kk].shape == (nf[kk], ntypes) + stats_input[kk] = ( + merged_natoms[kk].sum(axis=1).reshape([-1, 1]) * stats_input[kk] + ) + assert stats_input[kk].shape == (nf[kk], task_dim) + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + rcond=rcond, + ) + else: + # this key does not have global labels, skip it. + continue + bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) + + # unbias_e is only used for print rmse + + if model_pred is None: + unbias_e = { + kk: merged_natoms[kk] @ bias_atom_e[kk].reshape([ntypes, -1]) + for kk in bias_atom_e.keys() + } + else: + unbias_e = { + kk: model_pred[kk].reshape([nf[kk], -1]) + + merged_natoms[kk] @ bias_atom_e[kk].reshape([ntypes, -1]) + for kk in bias_atom_e.keys() + } + atom_numbs = {kk: merged_natoms[kk].sum(-1) for kk in bias_atom_e.keys()} + + def rmse(x): + return np.sqrt(np.mean(np.square(x))) + + for kk in bias_atom_e.keys(): + rmse_ae = rmse( + ( + unbias_e[kk].reshape([nf[kk], -1]).astype(merged_output[kk].dtype) + - merged_output[kk].reshape([nf[kk], -1]) + ) + / atom_numbs[kk][:, None].astype(merged_output[kk].dtype) + ) + log.info( + f"RMSE of {kk} per atom after linear regression is: {rmse_ae} in the unit of {kk}." + ) + return bias_atom_e, std_atom_e + + +def compute_output_stats_atomic( + sampled: list[dict], + ntypes: int, + keys: list[str], + model_pred: Optional[dict[str, np.ndarray]] = None, +): + # get label dict from sample; for each key, only picking the system with atomic labels. + outputs = { + kk: [ + system["atom_" + kk] + for system in sampled + if ("atom_" + kk) in system and system.get(f"find_atom_{kk}", 0) > 0 + ] + for kk in keys + } + natoms = { + kk: [ + system["atype"] + for system in sampled + if ("atom_" + kk) in system and system.get(f"find_atom_{kk}", 0) > 0 + ] + for kk in keys + } + # shape: (nframes, nloc, ndim) + merged_output = { + kk: to_numpy_array(paddle.concat(outputs[kk])) + for kk in keys + if len(outputs[kk]) > 0 + } + merged_natoms = { + kk: to_numpy_array(paddle.concat(natoms[kk])) + for kk in keys + if len(natoms[kk]) > 0 + } + # reshape merged data to [nf, nloc, ndim] + merged_output = { + kk: merged_output[kk].reshape((*merged_natoms[kk].shape, -1)) + for kk in merged_output + } + + if model_pred is None: + stats_input = merged_output + else: + # subtract the model bias and output the delta bias + stats_input = { + kk: merged_output[kk] - model_pred[kk].reshape(merged_output[kk].shape) + for kk in keys + if kk in merged_output + } + + bias_atom_e = {} + std_atom_e = {} + + for kk in keys: + if kk in stats_input: + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_atomic( + stats_input[kk], + merged_natoms[kk], + ) + # correction for missing types + missing_types = ntypes - merged_natoms[kk].max() - 1 + if missing_types > 0: + assert ( + bias_atom_e[kk].dtype is std_atom_e[kk].dtype + ), "bias and std should be of the same dtypes" + nan_padding = np.empty( + (missing_types, bias_atom_e[kk].shape[1]), + dtype=bias_atom_e[kk].dtype, + ) + nan_padding.fill(np.nan) + bias_atom_e[kk] = np.concatenate([bias_atom_e[kk], nan_padding], axis=0) + std_atom_e[kk] = np.concatenate([std_atom_e[kk], nan_padding], axis=0) + else: + # this key does not have atomic labels, skip it. + continue + return bias_atom_e, std_atom_e diff --git a/deepmd/pd/utils/update_sel.py b/deepmd/pd/utils/update_sel.py new file mode 100644 index 0000000000..32b8d66c73 --- /dev/null +++ b/deepmd/pd/utils/update_sel.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +from deepmd.pd.utils.neighbor_stat import ( + NeighborStat, +) +from deepmd.utils.update_sel import ( + BaseUpdateSel, +) + + +class UpdateSel(BaseUpdateSel): + @property + def neighbor_stat(self) -> type[NeighborStat]: + return NeighborStat diff --git a/deepmd/pd/utils/utils.py b/deepmd/pd/utils/utils.py new file mode 100644 index 0000000000..b38ad9e887 --- /dev/null +++ b/deepmd/pd/utils/utils.py @@ -0,0 +1,176 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +from typing import ( + overload, +) + +import ml_dtypes +import numpy as np +import paddle +import paddle.nn.functional as F + +from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT +from deepmd.pd.model.network.init import ( + PaddleGenerator, +) + +from .env import ( + DEVICE, +) +from .env import PRECISION_DICT as PD_PRECISION_DICT + + +class ActivationFn(paddle.nn.Layer): + def __init__(self, activation: str | None): + super().__init__() + self.activation: str = activation if activation is not None else "linear" + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Returns the tensor after applying activation function corresponding to `activation`.""" + if self.activation.lower() == "relu": + return F.relu(x) + elif self.activation.lower() == "gelu" or self.activation.lower() == "gelu_tf": + return F.gelu(x, approximate=True) + elif self.activation.lower() == "tanh": + return paddle.tanh(x) + elif self.activation.lower() == "relu6": + return F.relu6(x) + elif self.activation.lower() == "softplus": + return F.softplus(x) + elif self.activation.lower() == "sigmoid": + return F.sigmoid(x) + elif self.activation.lower() == "linear" or self.activation.lower() == "none": + return x + else: + raise RuntimeError(f"activation function {self.activation} not supported") + + +@overload +def to_numpy_array(xx: paddle.Tensor) -> np.ndarray: ... + + +@overload +def to_numpy_array(xx: None) -> None: ... + + +def to_numpy_array( + xx, +): + if xx is None: + return None + assert xx is not None + # Create a reverse mapping of PD_PRECISION_DICT + reverse_precision_dict = {v: k for k, v in PD_PRECISION_DICT.items()} + # Use the reverse mapping to find keys with the desired value + prec = reverse_precision_dict.get(xx.dtype, None) + prec = NP_PRECISION_DICT.get(prec, np.float64) + if prec is None: + raise ValueError(f"unknown precision {xx.dtype}") + if isinstance(xx, np.ndarray): + return xx.astype(prec) + if xx.dtype == paddle.bfloat16: + xx = xx.astype(paddle.get_default_dtype()) + return xx.numpy().astype(prec) + + +@overload +def to_paddle_tensor(xx: np.ndarray) -> paddle.Tensor: ... + + +@overload +def to_paddle_tensor(xx: None) -> None: ... + + +def to_paddle_tensor( + xx, +): + if xx is None: + return None + assert xx is not None + if not isinstance(xx, np.ndarray): + return xx + # Create a reverse mapping of NP_PRECISION_DICT + reverse_precision_dict = {v: k for k, v in NP_PRECISION_DICT.items()} + # Use the reverse mapping to find keys with the desired value + prec = reverse_precision_dict.get(xx.dtype.type, None) + prec = PD_PRECISION_DICT.get(prec, None) + if prec is None: + raise ValueError(f"unknown precision {xx.dtype}") + if xx.dtype == ml_dtypes.bfloat16: + xx = xx.astype(np.float32) + return paddle.to_tensor(xx, dtype=prec, place=DEVICE) + + +def dict_to_device(sample_dict): + for key in sample_dict: + if isinstance(sample_dict[key], list): + sample_dict[key] = [item.to(DEVICE) for item in sample_dict[key]] + if isinstance(sample_dict[key], np.float32): + sample_dict[key] = ( + paddle.ones(1, dtype=paddle.float32).to(device=DEVICE) + * sample_dict[key] + ) + else: + if sample_dict[key] is not None: + sample_dict[key] = sample_dict[key].to(DEVICE) + + +# https://github.com/numpy/numpy/blob/a4cddb60489f821a1a4dffc16cd5c69755d43bdb/numpy/random/bit_generator.pyx#L58-L63 +INIT_A = 0x43B0D7E5 +MULT_A = 0x931E8875 +MIX_MULT_L = 0xCA01F9DD +MIX_MULT_R = 0x4973F715 +XSHIFT = 16 + + +def hashmix(value: int, hash_const: list[int]): + value ^= INIT_A + hash_const[0] *= MULT_A + value *= INIT_A + # prevent overflow + hash_const[0] &= 0xFFFF_FFFF_FFFF_FFFF + value &= 0xFFFF_FFFF_FFFF_FFFF + value ^= value >> XSHIFT + return value + + +def mix(x: int, y: int): + result = MIX_MULT_L * x - MIX_MULT_R * y + # prevent overflow + result &= 0xFFFF_FFFF_FFFF_FFFF + result ^= result >> XSHIFT + return result + + +def mix_entropy(entropy_array: list[int]) -> int: + # https://github.com/numpy/numpy/blob/a4cddb60489f821a1a4dffc16cd5c69755d43bdb/numpy/random/bit_generator.pyx#L341-L374 + hash_const = [INIT_A] + mixer = hashmix(entropy_array[0], hash_const) + for i_src in range(1, len(entropy_array)): + mixer = mix(mixer, hashmix(entropy_array[i_src], hash_const)) + return mixer + + +def get_generator( + seed: int | list[int] | None = None, +) -> PaddleGenerator | None: + if seed is not None: + if isinstance(seed, list): + seed = mix_entropy(seed) + if DEVICE == "cpu": + generator = paddle.framework.core.default_cpu_generator() + elif DEVICE == "gpu": + generator = paddle.framework.core.default_cuda_generator(0) + elif DEVICE.startswith("gpu:"): + generator = paddle.framework.core.default_cuda_generator( + int(DEVICE.split("gpu:")[1]) + ) + else: + raise ValueError("DEVICE should be cpu or gpu or gpu:x") + generator.manual_seed(seed) + return generator + else: + return None diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index edafc7c02e..33dcba3acb 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -35,9 +35,10 @@ INSTALLED_TF = Backend.get_backend("tensorflow")().is_available() INSTALLED_PT = Backend.get_backend("pytorch")().is_available() +INSTALLED_PD = Backend.get_backend("paddle")().is_available() -if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT): - raise ImportError("TensorFlow or PyTorch should be tested in the CI") +if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT and INSTALLED_PD): + raise ImportError("TensorFlow, PyTorch or Paddle should be tested in the CI") if INSTALLED_TF: @@ -57,6 +58,7 @@ "CommonTest", "INSTALLED_TF", "INSTALLED_PT", + "INSTALLED_PD", ] @@ -71,6 +73,8 @@ class CommonTest(ABC): """Native DP model class.""" pt_class: ClassVar[Optional[type]] """PyTorch model class.""" + pd_class: ClassVar[Optional[type]] + """Paddle model class.""" args: ClassVar[Optional[Union[Argument, List[Argument]]]] """Arguments that maps to the `data`.""" skip_dp: ClassVar[bool] = False @@ -79,6 +83,8 @@ class CommonTest(ABC): """Whether to skip the TensorFlow model.""" skip_pt: ClassVar[bool] = not INSTALLED_PT """Whether to skip the PyTorch model.""" + skip_pd: ClassVar[bool] = not INSTALLED_PD + """Whether to skip the Paddle model.""" rtol = 1e-10 """Relative tolerance for comparing the return value. Override for float32.""" atol = 1e-10 @@ -149,12 +155,23 @@ def eval_pt(self, pt_obj: Any) -> Any: The object of PT """ + @abstractmethod + def eval_pd(self, pd_obj: Any) -> Any: + """Evaluate the return value of PD. + + Parameters + ---------- + pd_obj : Any + The object of PD + """ + class RefBackend(Enum): """Reference backend.""" TF = 1 DP = 2 PT = 3 + PD = 4 @abstractmethod def extract_ret(self, ret: Any, backend: RefBackend) -> Tuple[np.ndarray, ...]: @@ -215,6 +232,11 @@ def get_dp_ret_serialization_from_cls(self, obj): data = obj.serialize() return ret, data + def get_pd_ret_serialization_from_cls(self, obj): + ret = self.eval_pd(obj) + data = obj.serialize() + return ret, data + def get_reference_backend(self): """Get the reference backend. @@ -226,6 +248,8 @@ def get_reference_backend(self): return self.RefBackend.TF if not self.skip_pt: return self.RefBackend.PT + if not self.skip_pd: + return self.RefBackend.PD raise ValueError("No available reference") def get_reference_ret_serialization(self, ref: RefBackend): @@ -239,6 +263,9 @@ def get_reference_ret_serialization(self, ref: RefBackend): if ref == self.RefBackend.PT: obj = self.init_backend_cls(self.pt_class) return self.get_pt_ret_serialization_from_cls(obj) + if ref == self.RefBackend.PD: + obj = self.init_backend_cls(self.pd_class) + return self.get_pd_ret_serialization_from_cls(obj) raise ValueError("No available reference") def test_tf_consistent_with_ref(self): @@ -359,6 +386,45 @@ def test_pt_self_consistent(self): else: self.assertEqual(rr1, rr2) + def test_pd_consistent_with_ref(self): + """Test whether PD and reference are consistent.""" + if self.skip_pt: + self.skipTest("Unsupported backend") + ref_backend = self.get_reference_backend() + if ref_backend == self.RefBackend.PD: + self.skipTest("Reference is self") + ret1, data1 = self.get_reference_ret_serialization(ref_backend) + ret1 = self.extract_ret(ret1, ref_backend) + obj = self.pt_class.deserialize(data1) + ret2 = self.eval_pt(obj) + ret2 = self.extract_ret(ret2, self.RefBackend.PD) + data2 = obj.serialize() + if obj.__class__.__name__.startswith(("Polar", "Dipole", "DOS")): + # tf, pd serialization mismatch + common_keys = set(data1.keys()) & set(data2.keys()) + data1 = {k: data1[k] for k in common_keys} + data2 = {k: data2[k] for k in common_keys} + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + + def test_pd_self_consistent(self): + """Test whether PT is self consistent.""" + if self.skip_pd: + self.skipTest("Unsupported backend") + obj1 = self.init_backend_cls(self.pd_class) + ret1, data1 = self.get_pd_ret_serialization_from_cls(obj1) + obj2 = self.pd_class.deserialize(data1) + ret2, data2 = self.get_pd_ret_serialization_from_cls(obj2) + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + else: + self.assertEqual(rr1, rr2) + def tearDown(self) -> None: """Clear the TF session.""" if not self.skip_tf: diff --git a/source/tests/consistent/descriptor/common.py b/source/tests/consistent/descriptor/common.py index 74fc3d9b07..a78e8dc9b6 100644 --- a/source/tests/consistent/descriptor/common.py +++ b/source/tests/consistent/descriptor/common.py @@ -12,6 +12,7 @@ ) from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -29,6 +30,14 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_PD: + import paddle + + from deepmd.pd.utils.env import DEVICE as PD_DEVICE + from deepmd.pd.utils.nlist import build_neighbor_list as build_neighbor_list_pd + from deepmd.pd.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pd, + ) class DescriptorTest: @@ -99,3 +108,25 @@ def eval_pt_descriptor( x.detach().cpu().numpy() if torch.is_tensor(x) else x for x in pt_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) ] + + def eval_pd_descriptor( + self, pd_obj: Any, natoms, coords, atype, box, mixed_types: bool = False + ) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pd( + paddle.to_tensor(coords).to(PD_DEVICE).reshape([1, -1, 3]), + paddle.to_tensor(atype).to(PD_DEVICE).reshape([1, -1]), + paddle.to_tensor(box).to(PD_DEVICE).reshape([1, 3, 3]), + pd_obj.get_rcut(), + ) + nlist = build_neighbor_list_pd( + ext_coords, + ext_atype, + natoms[0], + pd_obj.get_rcut(), + pd_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + return [ + x.detach().cpu().numpy() if paddle.is_tensor(x) else x + for x in pd_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) + ] diff --git a/source/tests/consistent/descriptor/test_se_e2_a.py b/source/tests/consistent/descriptor/test_se_e2_a.py index 1e3e5ae86d..255f0e6cd6 100644 --- a/source/tests/consistent/descriptor/test_se_e2_a.py +++ b/source/tests/consistent/descriptor/test_se_e2_a.py @@ -13,6 +13,7 @@ ) from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -30,6 +31,10 @@ from deepmd.tf.descriptor.se_a import DescrptSeA as DescrptSeATF else: DescrptSeATF = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.se_a import DescrptSeA as DescrptSeAPD +else: + DescrptSeAPD = None from deepmd.utils.argcheck import ( descrpt_se_a_args, ) @@ -99,9 +104,21 @@ def skip_tf(self) -> bool: ) = self.param return env_protection != 0.0 + @property + def skip_pd(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + env_protection, + ) = self.param + return CommonTest.skip_pd + tf_class = DescrptSeATF dp_class = DescrptSeADP pt_class = DescrptSeAPT + pd_class = DescrptSeAPD args = descrpt_se_a_args() def setUp(self): @@ -178,6 +195,15 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: return (ret[0],) diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index bdd4b7cf81..ee122e712f 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -2,6 +2,7 @@ from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -13,6 +14,8 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_PD: + pass class FittingTest: diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 157b1bab8a..2596f93893 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -13,6 +13,7 @@ ) from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -33,6 +34,13 @@ from deepmd.tf.fit.ener import EnerFitting as EnerFittingTF else: EnerFittingTF = object +if INSTALLED_PD: + import paddle + + from deepmd.pd.model.task.ener import EnergyFittingNet as EnerFittingPD + from deepmd.pd.utils.env import DEVICE as PD_DEVICE +else: + EnerFittingPD = object from deepmd.utils.argcheck import ( fitting_ener, ) @@ -75,9 +83,21 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt + @property + def skip_pd(self) -> bool: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + atom_ener, + ) = self.param + return CommonTest.skip_pd + tf_class = EnerFittingTF dp_class = EnerFittingDP pt_class = EnerFittingPT + pd_class = EnerFittingPD args = fitting_ener() def setUp(self): @@ -158,6 +178,28 @@ def eval_dp(self, dp_obj: Any) -> Any: fparam=self.fparam if numb_fparam else None, )["energy"] + @unittest.skip("cublas bf16 gemm requires GPU compute capability >= 80 in Paddle") + def eval_pd(self, pd_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + atom_ener, + ) = self.param + return ( + pd_obj( + paddle.to_tensor(self.inputs).to(device=PD_DEVICE), + paddle.to_tensor(self.atype.reshape([1, -1])).to(device=PD_DEVICE), + fparam=paddle.to_tensor(self.fparam).to(device=PD_DEVICE) + if numb_fparam + else None, + )["energy"] + .detach() + .cpu() + .numpy() + ) + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index 294edec1d6..03c0039d92 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -8,6 +8,7 @@ ) from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -20,6 +21,9 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_PD: + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy + from deepmd.pd.utils.utils import to_paddle_tensor as numpy_to_paddle class ModelTest: @@ -62,3 +66,13 @@ def eval_pt_model(self, pt_obj: Any, natoms, coords, atype, box) -> Any: box=numpy_to_torch(box), ).items() } + + def eval_pd_model(self, pd_obj: Any, natoms, coords, atype, box) -> Any: + return { + kk: paddle_to_numpy(vv) + for kk, vv in pd_obj( + numpy_to_paddle(coords), + numpy_to_paddle(atype), + box=numpy_to_paddle(box), + ).items() + } diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index c8ff9e4dcf..eec01ca81e 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -14,6 +14,7 @@ ) from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -33,6 +34,12 @@ from deepmd.tf.model.ener import EnerModel as EnergyModelTF else: EnergyModelTF = None +if INSTALLED_PD: + from deepmd.pd.model.model import get_model as get_model_pd + from deepmd.pd.model.model.ener_model import EnergyModel as EnergyModelPD + +else: + EnergyModelPD = None from deepmd.utils.argcheck import ( model_args, ) @@ -85,6 +92,7 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + pd_class = EnergyModelPD args = model_args() def skip_tf(self): @@ -100,6 +108,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_dp(data) elif cls is EnergyModelPT: return get_model_pt(data) + elif cls is EnergyModelPD: + return get_model_pd(data) return cls(**data, **self.addtional_data) def setUp(self): @@ -177,4 +187,6 @@ def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: return (ret["energy"].ravel(), ret["atom_energy"].ravel()) elif backend is self.RefBackend.TF: return (ret[0].ravel(), ret[1].ravel()) + elif backend is self.RefBackend.PD: + return (ret["energy"].ravel(), ret["atom_energy"].ravel()) raise ValueError(f"Unknown backend: {backend}") diff --git a/source/tests/consistent/test_activation.py b/source/tests/consistent/test_activation.py index 3fcb9b2fa5..44e887524b 100644 --- a/source/tests/consistent/test_activation.py +++ b/source/tests/consistent/test_activation.py @@ -12,6 +12,7 @@ GLOBAL_SEED, ) from .common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, parameterized, @@ -19,8 +20,8 @@ if INSTALLED_PT: from deepmd.pt.utils.utils import ActivationFn as ActivationFn_pt + from deepmd.pt.utils.utils import to_numpy_array as torch_to_numpy_array from deepmd.pt.utils.utils import ( - to_numpy_array, to_torch_tensor, ) if INSTALLED_TF: @@ -28,6 +29,12 @@ from deepmd.tf.env import ( tf, ) +if INSTALLED_PD: + from deepmd.pd.utils.utils import ActivationFn as ActivationFn_pd + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy_array + from deepmd.pd.utils.utils import ( + to_paddle_tensor, + ) @parameterized( @@ -53,7 +60,15 @@ def test_tf_consistent_with_ref(self): @unittest.skipUnless(INSTALLED_PT, "PyTorch is not installed") def test_pt_consistent_with_ref(self): if INSTALLED_PT: - test = to_numpy_array( + test = torch_to_numpy_array( ActivationFn_pt(self.activation)(to_torch_tensor(self.random_input)) ) np.testing.assert_allclose(self.ref, test, atol=1e-10) + + @unittest.skipUnless(INSTALLED_PD, "Paddle is not installed") + def test_pd_consistent_with_ref(self): + if INSTALLED_PD: + test = paddle_to_numpy_array( + ActivationFn_pd(self.activation)(to_paddle_tensor(self.random_input)) + ) + np.testing.assert_allclose(self.ref, test, atol=1e-10) diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index 6583dddb5f..8c61654f84 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -13,6 +13,7 @@ ) from .common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -30,6 +31,13 @@ from deepmd.tf.utils.type_embed import TypeEmbedNet as TypeEmbedNetTF else: TypeEmbedNetTF = object +if INSTALLED_PD: + import paddle + + from deepmd.pd.model.network.network import TypeEmbedNetConsistent as TypeEmbedNetPD + from deepmd.pd.utils.env import DEVICE as PD_DEVICE +else: + TypeEmbedNetPD = object @parameterized( @@ -63,6 +71,7 @@ def data(self) -> dict: tf_class = TypeEmbedNetTF dp_class = TypeEmbedNetDP pt_class = TypeEmbedNetPT + pd_class = TypeEmbedNetPD args = type_embedding_args() @property @@ -103,6 +112,12 @@ def eval_pt(self, pt_obj: Any) -> Any: for x in (pt_obj(device=PT_DEVICE),) ] + def eval_pd(self, pd_obj: Any) -> Any: + return [ + x.detach().cpu().numpy() if paddle.is_tensor(x) else x + for x in (pd_obj(device=PD_DEVICE),) + ] + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: return (ret[0],) From 2ae45b8520cfa4e59b0ee5225a5e7d564b2a8ac7 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 2 Nov 2024 18:35:56 +0800 Subject: [PATCH 03/58] clean env and training --- deepmd/pd/train/training.py | 14 -------------- deepmd/pd/utils/env.py | 3 +-- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 72ffdf404c..9b5fc1fbb4 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -47,7 +47,6 @@ get_weighted_sampler, ) from deepmd.pd.utils.env import ( - CINN, DEVICE, JIT, NUM_WORKERS, @@ -603,19 +602,6 @@ def warm_up_linear(step, warmup_steps): ) self.optimizer = fleet.distributed_optimizer(self.optimizer) - if CINN: - from paddle import ( - static, - ) - - build_strategy = static.BuildStrategy() - build_strategy.build_cinn_pass = CINN - self.wrapper.forward = paddle.jit.to_static( - self.wrapper.forward, - build_strategy=build_strategy, - full_graph=True, - )(self.wrapper.forward) - # Get model prob for multi-task if self.multi_task: self.model_prob = np.array([0.0 for key in self.model_keys]) diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 867deac35b..54aa0cf058 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -33,7 +33,6 @@ paddle.device.set_device(DEVICE) JIT = False -CINN = False CACHE_PER_SYS = 5 # keep at most so many sets per sys in memory ENERGY_BIAS_TRAINABLE = True @@ -102,8 +101,8 @@ def enable_prim(enable: bool = True): "NUM_WORKERS", "DEVICE", "JIT", - "CINN", "CACHE_PER_SYS", "ENERGY_BIAS_TRAINABLE", "LOCAL_RANK", + "enable_prim", ] From 7f03a049780f409d7d41d433957e6dd130b5ae20 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 2 Nov 2024 18:43:35 +0800 Subject: [PATCH 04/58] add more test files --- pyproject.toml | 81 ++- source/tests/pd/__init__.py | 1 + source/tests/pd/common.py | 263 ++++++++ source/tests/pd/conftest.py | 9 + source/tests/pd/model/__init__.py | 1 + source/tests/pd/model/test_autodiff.py | 263 ++++++++ source/tests/pd/model/test_descriptor.py | 195 ++++++ source/tests/pd/model/test_dp_atomic_model.py | 235 +++++++ source/tests/pd/model/test_dp_model.py | 633 ++++++++++++++++++ source/tests/pd/model/test_embedding_net.py | 217 ++++++ source/tests/pd/model/test_ener_fitting.py | 150 +++++ source/tests/pd/model/test_env_mat.py | 187 ++++++ source/tests/pd/model/test_exclusion_mask.py | 70 ++ source/tests/pd/model/test_fitting_net.py | 148 ++++ source/tests/pd/model/test_forward_lower.py | 208 ++++++ source/tests/pd/model/test_get_model.py | 113 ++++ source/tests/pd/model/test_mlp.py | 283 ++++++++ source/tests/pd/model/test_model.py | 424 ++++++++++++ source/tests/pd/model/test_nlist.py | 304 +++++++++ source/tests/pd/model/test_permutation.py | 489 ++++++++++++++ source/tests/pd/model/test_region.py | 98 +++ source/tests/pd/model/test_rot.py | 234 +++++++ source/tests/pd/model/test_rotation.py | 113 ++++ .../tests/pd/model/test_saveload_se_e2_a.py | 138 ++++ source/tests/pd/model/test_se_e2_a.py | 137 ++++ source/tests/pd/model/test_trans.py | 168 +++++ .../model/water/data/data_0/set.000/box.npy | Bin 0 -> 3008 bytes .../model/water/data/data_0/set.000/coord.npy | Bin 0 -> 184448 bytes .../water/data/data_0/set.000/energy.npy | Bin 0 -> 448 bytes .../model/water/data/data_0/set.000/force.npy | Bin 0 -> 184448 bytes .../tests/pd/model/water/data/data_0/type.raw | 192 ++++++ .../pd/model/water/data/data_0/type_map.raw | 2 + .../model/water/data/single/set.000/box.npy | Bin 0 -> 164 bytes .../model/water/data/single/set.000/coord.npy | Bin 0 -> 2432 bytes .../water/data/single/set.000/energy.npy | Bin 0 -> 132 bytes .../model/water/data/single/set.000/force.npy | Bin 0 -> 2432 bytes .../tests/pd/model/water/data/single/type.raw | 192 ++++++ .../pd/model/water/data/single/type_map.raw | 2 + source/tests/pd/model/water/se_e2_a.json | 77 +++ source/tests/pd/requirements.txt | 6 + source/tests/pd/test_auto_batch_size.py | 37 + source/tests/pd/test_finetune.py | 380 +++++++++++ source/tests/pd/test_loss.py | 585 ++++++++++++++++ source/tests/pd/test_lr.py | 106 +++ source/tests/pd/test_neighbor_stat.py | 69 ++ source/tests/pd/test_sampler.py | 114 ++++ source/tests/pd/test_update_sel.py | 194 ++++++ source/tests/pd/test_utils.py | 35 + source/tests/pd/water | 1 + 49 files changed, 7136 insertions(+), 18 deletions(-) create mode 100644 source/tests/pd/__init__.py create mode 100644 source/tests/pd/common.py create mode 100644 source/tests/pd/conftest.py create mode 100644 source/tests/pd/model/__init__.py create mode 100644 source/tests/pd/model/test_autodiff.py create mode 100644 source/tests/pd/model/test_descriptor.py create mode 100644 source/tests/pd/model/test_dp_atomic_model.py create mode 100644 source/tests/pd/model/test_dp_model.py create mode 100644 source/tests/pd/model/test_embedding_net.py create mode 100644 source/tests/pd/model/test_ener_fitting.py create mode 100644 source/tests/pd/model/test_env_mat.py create mode 100644 source/tests/pd/model/test_exclusion_mask.py create mode 100644 source/tests/pd/model/test_fitting_net.py create mode 100644 source/tests/pd/model/test_forward_lower.py create mode 100644 source/tests/pd/model/test_get_model.py create mode 100644 source/tests/pd/model/test_mlp.py create mode 100644 source/tests/pd/model/test_model.py create mode 100644 source/tests/pd/model/test_nlist.py create mode 100644 source/tests/pd/model/test_permutation.py create mode 100644 source/tests/pd/model/test_region.py create mode 100644 source/tests/pd/model/test_rot.py create mode 100644 source/tests/pd/model/test_rotation.py create mode 100644 source/tests/pd/model/test_saveload_se_e2_a.py create mode 100644 source/tests/pd/model/test_se_e2_a.py create mode 100644 source/tests/pd/model/test_trans.py create mode 100644 source/tests/pd/model/water/data/data_0/set.000/box.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/coord.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/energy.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/force.npy create mode 100644 source/tests/pd/model/water/data/data_0/type.raw create mode 100644 source/tests/pd/model/water/data/data_0/type_map.raw create mode 100644 source/tests/pd/model/water/data/single/set.000/box.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/coord.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/energy.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/force.npy create mode 100644 source/tests/pd/model/water/data/single/type.raw create mode 100644 source/tests/pd/model/water/data/single/type_map.raw create mode 100644 source/tests/pd/model/water/se_e2_a.json create mode 100644 source/tests/pd/requirements.txt create mode 100644 source/tests/pd/test_auto_batch_size.py create mode 100644 source/tests/pd/test_finetune.py create mode 100644 source/tests/pd/test_loss.py create mode 100644 source/tests/pd/test_lr.py create mode 100644 source/tests/pd/test_neighbor_stat.py create mode 100644 source/tests/pd/test_sampler.py create mode 100644 source/tests/pd/test_update_sel.py create mode 100644 source/tests/pd/test_utils.py create mode 120000 source/tests/pd/water diff --git a/pyproject.toml b/pyproject.toml index f181b616a3..15036d155c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,10 @@ classifiers = [ "Programming Language :: Python :: 3 :: Only", "Environment :: GPU :: NVIDIA CUDA :: 12 :: 12.2", "Intended Audience :: Science/Research", - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Physics", @@ -44,14 +47,14 @@ dependencies = [ 'typing_extensions; python_version < "3.8"', 'importlib_metadata>=1.4; python_version < "3.8"', 'h5py', - "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", + "h5py>=3.6.0,!=3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", 'wcmatch', 'packaging', 'ml_dtypes', 'mendeleev', 'array-api-compat', ] -requires-python = ">=3.8" +requires-python = ">=3.9" keywords = ["deepmd"] [project.entry-points."lammps.plugins"] @@ -86,7 +89,7 @@ test = [ ] docs = [ "sphinx>=3.1.1", - "sphinx_rtd_theme>=1.0.0rc1", + "sphinx-book-theme", "myst-nb>=1.0.0rc0", "myst-parser>=0.19.2", "sphinx-design", @@ -94,7 +97,7 @@ docs = [ "exhale>=0.3.7", "numpydoc", "ase", - "deepmodeling-sphinx>=0.1.0", + "deepmodeling-sphinx>=0.3.0", "dargs>=0.3.4", "sphinx-argparse<0.5.0", "pygments-lammps", @@ -102,9 +105,10 @@ docs = [ "sphinx-autoapi>=3.0.0", "sphinxcontrib-programoutput", "sphinxcontrib-moderncmakedomain", + "sphinx-remove-toctrees", ] lmp = [ - "lammps~=2024.8.29.0.0", + "lammps~=2024.8.29.1.0", ] ipi = [ "ipi", @@ -129,13 +133,32 @@ cu12 = [ "nvidia-curand-cu12", "nvidia-cusolver-cu12", "nvidia-cusparse-cu12", - "nvidia-cudnn-cu12<9", + "nvidia-cudnn-cu12", "nvidia-cuda-nvcc-cu12", ] +jax = [ + # below is a funny workaround for + # https://github.com/astral-sh/uv/issues/8601 + 'jax>=0.4.33;python_version>="3.10"', + 'jax>=0.4.33;python_version>="3.10"', + 'flax>=0.10.0;python_version>="3.10"', + 'flax>=0.10.0;python_version>="3.10"', + 'orbax-checkpoint;python_version>="3.10"', + 'orbax-checkpoint;python_version>="3.10"', + # The pinning of ml_dtypes may conflict with TF + # 'jax-ai-stack;python_version>="3.10"', +] [tool.deepmd_build_backend.scripts] dp = "deepmd.main:main" +[dependency-groups] +dev = [ + "pre-commit", + "cmake", + "mpich", +] + [tool.setuptools_scm] [tool.scikit-build] @@ -201,12 +224,12 @@ replacement = '\1="https://github.com/deepmodeling/deepmd-kit/raw/master/\g<2>"' [tool.cibuildwheel] test-command = [ "python -m deepmd -h", - """python -c "import deepmd.tf;import deepmd.pt" """, + """python -c "import deepmd.tf;import deepmd.pt;import deepmd.pd" """, "dp -h", "dp_ipi", "pytest {project}/source/tests/tf/test_lammps.py" ] -test-extras = ["cpu", "test", "lmp", "ipi", "torch"] +test-extras = ["cpu", "test", "lmp", "ipi", "torch", "paddle"] build = ["cp311-*"] skip = ["*-win32", "*-manylinux_i686", "*-musllinux*"] # TODO: uncomment to use the latest image when CUDA 11 is deprecated @@ -222,9 +245,10 @@ repair-wheel-command = """delocate-wheel --require-archs {delocate_archs} -w {de [tool.cibuildwheel.macos.environment] PIP_PREFER_BINARY = "1" -DP_LAMMPS_VERSION = "stable_29Aug2024" +DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" # for unclear reason, when enabling PyTorch, OpenMP is found accidentally CMAKE_ARGS = "-DCMAKE_DISABLE_FIND_PACKAGE_OpenMP=1" @@ -258,9 +282,10 @@ before-build = [ ] [tool.cibuildwheel.linux.environment] PIP_PREFER_BINARY = "1" -DP_LAMMPS_VERSION = "stable_29Aug2024" +DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" MPI_HOME = "/usr/lib64/mpich" PATH = "/usr/lib64/mpich/bin:$PATH" # use CPU version of torch for building, which should also work for GPU @@ -269,11 +294,9 @@ PATH = "/usr/lib64/mpich/bin:$PATH" UV_EXTRA_INDEX_URL = "https://download.pytorch.org/whl/cpu" # trick to find the correction version of mpich CMAKE_PREFIX_PATH="/opt/python/cp311-cp311/" -# PT 2.4.0 requires cudnn 9, incompatible with TF with cudnn 8 -PYTORCH_VERSION = "2.3.1" [tool.cibuildwheel.windows] -test-extras = ["cpu", "torch"] +test-extras = ["cpu", "torch", "paddle"] test-command = [ "python -m deepmd -h", "dp -h", @@ -281,6 +304,7 @@ test-command = [ [tool.cibuildwheel.windows.environment] PIP_PREFER_BINARY = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" # One can run `tox` or `tox -e gpu` # to run pytest in an isolated environment @@ -311,14 +335,14 @@ legacy_tox_ini = """ # be silenced # W504 - line break after binary operator - there is conflict between W503 and W504 in -# some lintners. One recomends line bread after and one before binary operator so we -# swith W504 off and recomend this coding style: +# some lintners. One recommends line bread after and one before binary operator so we +# switch W504 off and recommend this coding style: # a = (b + -> instead of -> a = (b # c) + c) [tool.autopep8] ignore = "W504" -# D413 - Missing blank line after last section - makes no sense only adds empy lines in +# D413 - Missing blank line after last section - makes no sense only adds empty lines in # docstrings # D416 - Section name should end with a colon - only applicable to RST type docstrings, # we are using numpy style @@ -384,8 +408,10 @@ convention = "numpy" banned-module-level-imports = [ "deepmd.tf", "deepmd.pt", + "deepmd.pd", "tensorflow", "torch", + "paddle", ] [tool.ruff.lint.flake8-tidy-imports.banned-api] @@ -395,9 +421,13 @@ banned-module-level-imports = [ # Also ignore `E402` in all `__init__.py` files. "deepmd/tf/**" = ["TID253"] "deepmd/pt/**" = ["TID253"] +"deepmd/jax/**" = ["TID253"] +"deepmd/pd/**" = ["TID253"] "source/tests/tf/**" = ["TID253"] "source/tests/pt/**" = ["TID253"] +"source/tests/pd/**" = ["TID253"] "source/tests/universal/pt/**" = ["TID253"] +"source/tests/universal/pd/**" = ["TID253"] "source/ipi/tests/**" = ["TID253"] "source/lmp/tests/**" = ["TID253"] "**/*.ipynb" = ["T20"] # printing in a nb file is expected @@ -411,4 +441,19 @@ plugins = ["source.3rdparty.coverage_plugins.jit_plugin"] [tool.pylint.'MESSAGES CONTROL'] load-plugins = "deepmd_checker" disable = "all" -enable = "EDP01,EDP02" +enable = "E8001,E8002" + +[tool.flake8] +select = [ + "TOR0", + "TOR1", + "TOR2", +] + +[tool.uv.sources] +mpich = { index = "mpi4py" } + +[[tool.uv.index]] +name = "mpi4py" +url = "https://pypi.anaconda.org/mpi4py/simple" +explicit = true diff --git a/source/tests/pd/__init__.py b/source/tests/pd/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/pd/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/pd/common.py b/source/tests/pd/common.py new file mode 100644 index 0000000000..59a9672330 --- /dev/null +++ b/source/tests/pd/common.py @@ -0,0 +1,263 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.main import ( + main, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_PD_FLOAT_PRECISION, +) + + +def run_dp(cmd: str) -> int: + """Run DP directly from the entry point instead of the subprocess. + + It is quite slow to start DeePMD-kit with subprocess. + + Parameters + ---------- + cmd : str + The command to run. + + Returns + ------- + int + Always returns 0. + """ + cmds = cmd.split() + if cmds[0] == "dp": + cmds = cmds[1:] + else: + raise RuntimeError("The command is not dp") + + main(cmds) + return 0 + + +def eval_model( + model, + coords: Union[np.ndarray, paddle.Tensor], + cells: Optional[Union[np.ndarray, paddle.Tensor]], + atom_types: Union[np.ndarray, paddle.Tensor, list[int]], + spins: Optional[Union[np.ndarray, paddle.Tensor]] = None, + atomic: bool = False, + infer_batch_size: int = 2, + denoise: bool = False, +): + model = model.to(DEVICE) + energy_out = [] + atomic_energy_out = [] + force_out = [] + force_mag_out = [] + virial_out = [] + atomic_virial_out = [] + updated_coord_out = [] + logits_out = [] + err_msg = ( + f"All inputs should be the same format, " + f"but found {type(coords)}, {type(cells)}, {type(atom_types)} instead! " + ) + return_tensor = True + if isinstance(coords, paddle.Tensor): + if cells is not None: + assert isinstance(cells, paddle.Tensor), err_msg + if spins is not None: + assert isinstance(spins, paddle.Tensor), err_msg + assert isinstance(atom_types, paddle.Tensor) or isinstance(atom_types, list) + atom_types = paddle.to_tensor(atom_types, dtype=paddle.int32, place=DEVICE) + elif isinstance(coords, np.ndarray): + if cells is not None: + assert isinstance(cells, np.ndarray), err_msg + if spins is not None: + assert isinstance(spins, np.ndarray), err_msg + assert isinstance(atom_types, np.ndarray) or isinstance(atom_types, list) + atom_types = np.array(atom_types, dtype=np.int32) + return_tensor = False + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + if isinstance(atom_types, paddle.Tensor): + atom_types = paddle.tile(atom_types.unsqueeze(0), [nframes, 1]).reshape( + [nframes, -1] + ) + else: + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = paddle.to_tensor( + coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PD_FLOAT_PRECISION, place=DEVICE + ) + spin_input = None + if spins is not None: + spin_input = paddle.to_tensor( + spins.reshape([-1, natoms, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + has_spin = getattr(model, "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + type_input = paddle.to_tensor(atom_types, dtype=paddle.int64, place=DEVICE) + box_input = None + if cells is None: + pbc = False + else: + pbc = True + box_input = paddle.to_tensor( + cells.reshape([-1, 3, 3]), dtype=GLOBAL_PD_FLOAT_PRECISION, place=DEVICE + ) + num_iter = int((nframes + infer_batch_size - 1) / infer_batch_size) + + for ii in range(num_iter): + batch_coord = coord_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_atype = type_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_box = None + batch_spin = None + if spin_input is not None: + batch_spin = spin_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + if pbc: + batch_box = box_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + input_dict = { + "coord": batch_coord, + "atype": batch_atype, + "box": batch_box, + "do_atomic_virial": atomic, + } + if has_spin: + input_dict["spin"] = batch_spin + batch_output = model(**input_dict) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + if not return_tensor: + if "energy" in batch_output: + energy_out.append(batch_output["energy"].numpy()) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"].numpy()) + if "force" in batch_output: + force_out.append(batch_output["force"].numpy()) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"].numpy()) + if "virial" in batch_output: + virial_out.append(batch_output["virial"].numpy()) + if "atom_virial" in batch_output: + atomic_virial_out.append(batch_output["atom_virial"].numpy()) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"].numpy()) + if "logits" in batch_output: + logits_out.append(batch_output["logits"].numpy()) + else: + if "energy" in batch_output: + energy_out.append(batch_output["energy"]) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"]) + if "force" in batch_output: + force_out.append(batch_output["force"]) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"]) + if "virial" in batch_output: + virial_out.append(batch_output["virial"]) + if "atom_virial" in batch_output: + atomic_virial_out.append(batch_output["atom_virial"]) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"]) + if "logits" in batch_output: + logits_out.append(batch_output["logits"]) + if not return_tensor: + energy_out = ( + np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) # pylint: disable=no-explicit-dtype + ) + atomic_energy_out = ( + np.concatenate(atomic_energy_out) + if atomic_energy_out + else np.zeros([nframes, natoms, 1]) # pylint: disable=no-explicit-dtype + ) + force_out = ( + np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + force_mag_out = ( + np.concatenate(force_mag_out) + if force_mag_out + else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + virial_out = ( + np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) # pylint: disable=no-explicit-dtype + ) + atomic_virial_out = ( + np.concatenate(atomic_virial_out) + if atomic_virial_out + else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype + ) + updated_coord_out = ( + np.concatenate(updated_coord_out) if updated_coord_out else None + ) + logits_out = np.concatenate(logits_out) if logits_out else None + else: + energy_out = ( + paddle.concat(energy_out) + if energy_out + else paddle.zeros([nframes, 1], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + atomic_energy_out = ( + paddle.concat(atomic_energy_out) + if atomic_energy_out + else paddle.zeros([nframes, natoms, 1], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + force_out = ( + paddle.concat(force_out) + if force_out + else paddle.zeros([nframes, natoms, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + force_mag_out = ( + paddle.concat(force_mag_out) + if force_mag_out + else paddle.zeros([nframes, natoms, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + virial_out = ( + paddle.concat(virial_out) + if virial_out + else paddle.zeros([nframes, 3, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + atomic_virial_out = ( + paddle.concat(atomic_virial_out) + if atomic_virial_out + else paddle.zeros( + [nframes, natoms, 3, 3], dtype=GLOBAL_PD_FLOAT_PRECISION + ).to(device=DEVICE) + ) + updated_coord_out = ( + paddle.concat(updated_coord_out) if updated_coord_out else None + ) + logits_out = paddle.concat(logits_out) if logits_out else None + if denoise: + return updated_coord_out, logits_out + else: + results_dict = { + "energy": energy_out, + "force": force_out, + "virial": virial_out, + } + if has_spin: + results_dict["force_mag"] = force_mag_out + if atomic: + results_dict["atom_energy"] = atomic_energy_out + results_dict["atom_virial"] = atomic_virial_out + return results_dict diff --git a/source/tests/pd/conftest.py b/source/tests/pd/conftest.py new file mode 100644 index 0000000000..530cb18907 --- /dev/null +++ b/source/tests/pd/conftest.py @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle +import pytest + + +@pytest.fixture(scope="package", autouse=True) +def clear_cuda_memory(request): + yield + paddle.device.cuda.empty_cache() diff --git a/source/tests/pd/model/__init__.py b/source/tests/pd/model/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/pd/model/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py new file mode 100644 index 0000000000..a056491fb3 --- /dev/null +++ b/source/tests/pd/model/test_autodiff.py @@ -0,0 +1,263 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = paddle.float64 + +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + + +# from deepmd-kit repo +def finite_difference(f, x, delta=1e-6): + in_shape = x.shape + y0 = f(x) + out_shape = y0.shape + res = np.empty(out_shape + in_shape) + for idx in np.ndindex(*in_shape): + diff = np.zeros(in_shape) + diff[idx] += delta + y1p = f(x + diff) + y1n = f(x - diff) + res[(Ellipsis, *idx)] = (y1p - y1n) / (2 * delta) + return res + + +def stretch_box(old_coord, old_box, new_box): + ocoord = old_coord.reshape(-1, 3) + obox = old_box.reshape(3, 3) + nbox = new_box.reshape(3, 3) + ncoord = ocoord @ np.linalg.inv(obox) @ nbox + return ncoord.reshape(old_coord.shape) + + +class ForceTest: + def test( + self, + ): + env.enable_prim(True) + places = 5 + delta = 1e-5 + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device="cpu") + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device="cpu") + coord = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + atype = paddle.to_tensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + spin = spin.numpy() + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + + def np_infer_coord( + coord, + ): + result = eval_model( + self.model, + paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + return ret + + def np_infer_spin( + spin, + ): + result = eval_model( + self.model, + paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + return ret + + def ff_coord(_coord): + return np_infer_coord(_coord)["energy"] + + def ff_spin(_spin): + return np_infer_spin(_spin)["energy"] + + if not test_spin: + fdf = -finite_difference(ff_coord, coord, delta=delta).squeeze() + rff = np_infer_coord(coord)["force"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + else: + # real force + fdf = -finite_difference(ff_coord, coord, delta=delta).squeeze() + rff = np_infer_coord(coord)["force"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + # magnetic force + fdf = -finite_difference(ff_spin, spin, delta=delta).squeeze() + rff = np_infer_spin(spin)["force_mag"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + + +class VirialTest: + def test( + self, + ): + places = 5 + delta = 1e-4 + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device="cpu") + cell = (cell) + 5.0 * paddle.eye(3).to(device="cpu") + coord = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + cell = cell.numpy() + test_keys = ["energy", "force", "virial"] + + def np_infer( + new_cell, + ): + result = eval_model( + self.model, + paddle.to_tensor(stretch_box(coord, cell, new_cell)) + .to(device="cpu") + .unsqueeze(0), + paddle.to_tensor(new_cell).to(device="cpu").unsqueeze(0), + atype, + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + # detach + return ret + + def ff(bb): + return np_infer(bb)["energy"] + + fdv = ( + -(finite_difference(ff, cell, delta=delta).transpose([0, 2, 1]) @ cell) + .squeeze() + .reshape([9]) + ) + rfv = np_infer(cell)["virial"] + np.testing.assert_almost_equal(fdv, rfv, decimal=places) + + +class TestEnergyModelSeAForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelSeAVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPAUniVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybridForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybridVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBLForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBLVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeAForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py new file mode 100644 index 0000000000..386c68595b --- /dev/null +++ b/source/tests/pd/model/test_descriptor.py @@ -0,0 +1,195 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +import json +from pathlib import ( + Path, +) + +from deepmd.pd.model.descriptor import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + decomp, + dp_random, + env, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, + GLOBAL_PD_FLOAT_PRECISION, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.env import ( + op_module, +) + +from ..test_finetune import ( + energy_data_requirement, +) +from .test_embedding_net import ( + get_single_batch, +) + +CUR_DIR = os.path.dirname(__file__) + + +def base_se_a(rcut, rcut_smth, sel, batch, mean, stddev): + g = tf.Graph() + with g.as_default(): + coord = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + box = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + atype = tf.placeholder(tf.int32, [None, None]) + natoms_vec = tf.placeholder(tf.int32, [None]) + default_mesh = tf.placeholder(tf.int32, [None]) + stat_descrpt, descrpt_deriv, rij, nlist = op_module.prod_env_mat_a( + coord, + atype, + natoms_vec, + box, + default_mesh, + tf.constant(mean), + tf.constant(stddev), + rcut_a=-1.0, + rcut_r=rcut, + rcut_r_smth=rcut_smth, + sel_a=sel, + sel_r=[0 for i in sel], + ) + + net_deriv_reshape = tf.ones_like(stat_descrpt) + force = op_module.prod_force_se_a( + net_deriv_reshape, + descrpt_deriv, + nlist, + natoms_vec, + n_a_sel=sum(sel), + n_r_sel=0, + ) + + with tf.Session(graph=g) as sess: + y = sess.run( + [stat_descrpt, force, nlist], + feed_dict={ + coord: batch["coord"], + box: batch["box"], + natoms_vec: batch["natoms"], + atype: batch["atype"], + default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return y + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(20) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSetForLoader( + self.systems[0], + model_config["type_map"], + ) + ds.add_data_requirement(energy_data_requirement) + self.np_batch, self.pt_batch = get_single_batch(ds) + self.sec = np.cumsum(self.sel) + self.ntypes = len(self.sel) + self.nnei = sum(self.sel) + + def test_consistency(self): + avg_zero = paddle.zeros( + [self.ntypes, self.nnei * 4], + dtype=GLOBAL_PD_FLOAT_PRECISION, + ).to(device=env.DEVICE) + std_ones = paddle.ones( + [self.ntypes, self.nnei * 4], + dtype=GLOBAL_PD_FLOAT_PRECISION, + ).to(device=env.DEVICE) + base_d, base_force, base_nlist = base_se_a( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + batch=self.np_batch, + mean=avg_zero.detach().cpu(), + stddev=std_ones.detach().cpu(), + ) + + pt_coord = self.pt_batch["coord"].to(env.DEVICE) + atype = self.pt_batch["atype"].to(env.DEVICE) + pt_coord.stop_gradient = False + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + pt_coord, + self.pt_batch["atype"].to(env.DEVICE), + self.rcut, + self.sel, + mixed_types=False, + box=self.pt_batch["box"].to(env.DEVICE), + ) + my_d, _, _ = prod_env_mat( + extended_coord, + nlist, + atype, + avg_zero.reshape([-1, self.nnei, 4]).to(DEVICE), + std_ones.reshape([-1, self.nnei, 4]).to(DEVICE), + self.rcut, + self.rcut_smth, + ) + my_d.sum().backward() + bsz = pt_coord.shape[0] + my_force = pt_coord.grad.reshape([bsz, -1, 3]).cpu().detach().numpy() + base_force = base_force.reshape(bsz, -1, 3) + base_d = base_d.reshape(bsz, -1, self.nnei, 4) + my_d = my_d.reshape([bsz, -1, self.nnei, 4]).cpu().detach().numpy() + base_nlist = base_nlist.reshape(bsz, -1, self.nnei) + + mapping = mapping.cpu() + my_nlist = nlist.reshape([bsz, -1]).cpu() + mask = my_nlist == -1 + my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) + my_nlist = decomp.take_along_axis(mapping, axis=-1, indices=my_nlist) + my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - mask.astype( + my_nlist.dtype + ) + my_nlist = my_nlist.cpu().reshape([bsz, -1, self.nnei]).numpy() + self.assertTrue(np.allclose(base_nlist, my_nlist)) + self.assertTrue(np.allclose(np.mean(base_d, axis=2), np.mean(my_d, axis=2))) + self.assertTrue(np.allclose(np.std(base_d, axis=2), np.std(my_d, axis=2))) + # descriptors may be different when there are multiple neighbors in the same distance + self.assertTrue(np.allclose(base_force, -my_force)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_dp_atomic_model.py b/source/tests/pd/model/test_dp_atomic_model.py new file mode 100644 index 0000000000..785bfa1076 --- /dev/null +++ b/source/tests/pd/model/test_dp_atomic_model.py @@ -0,0 +1,235 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.atomic_model import DPAtomicModel as DPDPAtomicModel +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.dpmodel.fitting import InvarFitting as DPInvarFitting +from deepmd.pd.model.atomic_model import ( + DPAtomicModel, +) +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, + TestCaseSingleFrameWithNlistWithVirtual, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDPAtomicModel(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + + # test the case of exclusion + for atom_excl, pair_excl in itertools.product([[], [1]], [[], [[0, 1]]]): + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + md0.reinit_atom_exclude(atom_excl) + md0.reinit_pair_exclude(pair_excl) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) + for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args) + ret1 = md1.forward_common_atomic(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + ) + + def test_dp_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPInvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md0 = DPDPAtomicModel(ds, ft, type_map=type_map) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + args0 = [self.coord_ext, self.atype_ext, self.nlist] + args1 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args0) + ret1 = md1.forward_common_atomic(*args1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + self.assertEqual(md0.get_rcut(), self.rcut) + self.assertEqual(md0.get_type_map(), type_map) + + def test_excl_consistency(self): + type_map = ["foo", "bar"] + + # test the case of exclusion + for atom_excl, pair_excl in itertools.product([[], [1]], [[], [[0, 1]]]): + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + + md0.reinit_atom_exclude(atom_excl) + md0.reinit_pair_exclude(pair_excl) + # hacking! + md1.descriptor.reinit_exclude(pair_excl) + md1.fitting_net.reinit_exclude(atom_excl) + + # check energy consistency + args = [ + to_paddle_tensor(ii) + for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args) + ret1 = md1.forward_common_atomic(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + ) + + # check output def + out_names = [vv.name for vv in md0.atomic_output_def().get_data().values()] + self.assertEqual(out_names, ["energy", "mask"]) + if atom_excl != []: + for ii in md0.atomic_output_def().get_data().values(): + if ii.name == "mask": + self.assertEqual(ii.shape, [1]) + self.assertFalse(ii.reducible) + self.assertFalse(ii.r_differentiable) + self.assertFalse(ii.c_differentiable) + + # check mask + if atom_excl == []: + pass + elif atom_excl == [1]: + self.assertIn("mask", ret0.keys()) + expected = np.array([1, 1, 0], dtype="int64") + expected = np.concatenate( + [expected, expected[self.perm[: self.nloc]]] + ).reshape(2, 3) + np.testing.assert_array_equal(to_numpy_array(ret0["mask"]), expected) + else: + raise ValueError(f"not expected atom_excl {atom_excl}") + + +class TestDPAtomicModelVirtualConsistency(unittest.TestCase): + def setUp(self): + self.case0 = TestCaseSingleFrameWithNlist() + self.case1 = TestCaseSingleFrameWithNlistWithVirtual() + self.case0.setUp() + self.case1.setUp() + + def test_virtual_consistency(self): + nf, _, _ = self.case0.nlist.shape + ds = DescrptSeA( + self.case0.rcut, + self.case0.rcut_smth, + self.case0.sel, + ) + ft = InvarFitting( + "energy", + self.case0.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md1 = DPAtomicModel(ds, ft, type_map=type_map).to(env.DEVICE) + + args0 = [self.case0.coord_ext, self.case0.atype_ext, self.case0.nlist] + args0 = [to_paddle_tensor(ii) for ii in args0] + args1 = [self.case1.coord_ext, self.case1.atype_ext, self.case1.nlist] + args1 = [to_paddle_tensor(ii) for ii in args1] + + ret0 = md1.forward_common_atomic(*args0) + ret1 = md1.forward_common_atomic(*args1) + + for dd in range(self.case0.nf): + np.testing.assert_allclose( + to_numpy_array(ret0["energy"])[dd], + to_numpy_array(ret1["energy"])[dd, self.case1.get_real_mapping[dd], :], + ) + expected_mask = np.array( + [ + [1, 0, 1, 1], + [1, 1, 0, 1], + ] + ) + np.testing.assert_equal(to_numpy_array(ret1["mask"]), expected_mask) diff --git a/source/tests/pd/model/test_dp_model.py b/source/tests/pd/model/test_dp_model.py new file mode 100644 index 0000000000..a281851f14 --- /dev/null +++ b/source/tests/pd/model/test_dp_model.py @@ -0,0 +1,633 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.dpmodel.fitting import EnergyFittingNet as DPEnergyFittingNet +from deepmd.dpmodel.model.ener_model import EnergyModel as DPEnergyModel +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.model import ( + EnergyModel, +) +from deepmd.pd.model.task.ener import ( + EnergyFittingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, + extend_input_and_build_neighbor_list, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, + TestCaseSingleFrameWithoutNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDPModel(unittest.TestCase, TestCaseSingleFrameWithoutNlist): + def setUp(self): + TestCaseSingleFrameWithoutNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc = self.atype.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward_common(*args) + ret1 = md1.forward_common(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_redu"]), + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r"]), + to_numpy_array(ret1["energy_derv_r"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret1["energy_derv_c_redu"]), + atol=self.atol, + ) + ret0 = md0.forward_common(*args, do_atomic_virial=True) + ret1 = md1.forward_common(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c"]), + to_numpy_array(ret1["energy_derv_c"]), + atol=self.atol, + ) + + coord_ext, atype_ext, mapping = extend_coord_with_ghosts( + to_paddle_tensor(self.coord), + to_paddle_tensor(self.atype), + to_paddle_tensor(self.cell), + self.rcut, + ) + nlist = build_neighbor_list( + coord_ext, + atype_ext, + self.nloc, + self.rcut, + self.sel, + distinguish_types=(not md0.mixed_types()), + ) + args = [coord_ext, atype_ext, nlist] + ret2 = md0.forward_common_lower(*args, do_atomic_virial=True) + # check the consistency between the reduced virial from + # forward_common and forward_common_lower + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret2["energy_derv_c_redu"]), + atol=self.atol, + ) + + def test_dp_consistency(self): + nf, nloc = self.atype.shape + nfp, nap = 2, 3 + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + numb_fparam=nfp, + numb_aparam=nap, + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + rng = np.random.default_rng(GLOBAL_SEED) + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + args0 = [self.coord, self.atype, self.cell] + args1 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + kwargs0 = {"fparam": fparam, "aparam": aparam} + kwargs1 = {kk: to_paddle_tensor(vv) for kk, vv in kwargs0.items()} + ret0 = md0.call(*args0, **kwargs0) + ret1 = md1.forward_common(*args1, **kwargs1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_dp_consistency_nopbc(self): + nf, nloc = self.atype.shape + nfp, nap = 2, 3 + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + numb_fparam=nfp, + numb_aparam=nap, + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + rng = np.random.default_rng(GLOBAL_SEED) + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, self.nloc, nap]) + args0 = [self.coord, self.atype] + args1 = [to_paddle_tensor(ii) for ii in args0] + kwargs0 = {"fparam": fparam, "aparam": aparam} + kwargs1 = {kk: to_paddle_tensor(vv) for kk, vv in kwargs0.items()} + ret0 = md0.call(*args0, **kwargs0) + ret1 = md1.forward_common(*args1, **kwargs1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_prec_consistency(self): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc = self.atype.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + nfp, nap = 2, 3 + type_map = ["foo", "bar"] + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + args64 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + args64[0] = args64[0].to(paddle.float64) + args64[2] = args64[2].to(paddle.float64) + args32 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + args32[0] = args32[0].to(paddle.float32) + args32[2] = args32[2].to(paddle.float32) + # fparam, aparam are converted to coordinate precision by model + fparam = to_paddle_tensor(fparam) + aparam = to_paddle_tensor(aparam) + + model_l_ret_64 = md1.forward_common(*args64, fparam=fparam, aparam=aparam) + model_l_ret_32 = md1.forward_common(*args32, fparam=fparam, aparam=aparam) + + for ii in model_l_ret_32.keys(): + if ii[-4:] == "redu": + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float32) + if ii != "mask": + self.assertEqual(model_l_ret_64[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_64[ii].dtype, paddle.int32) + np.testing.assert_allclose( + to_numpy_array(model_l_ret_32[ii]), + to_numpy_array(model_l_ret_64[ii]), + atol=self.atol, + ) + + +class TestDPModelLower(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_lower(*args) + ret1 = md1.forward_common_lower(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_redu"]), + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r"]), + to_numpy_array(ret1["energy_derv_r"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret1["energy_derv_c_redu"]), + atol=self.atol, + ) + ret0 = md0.forward_common_lower(*args, do_atomic_virial=True) + ret1 = md1.forward_common_lower(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c"]), + to_numpy_array(ret1["energy_derv_c"]), + atol=self.atol, + ) + + def test_dp_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args0 = [self.coord_ext, self.atype_ext, self.nlist] + args1 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.call_lower(*args0) + ret1 = md1.forward_common_lower(*args1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_prec_consistency(self): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + nfp, nap = 2, 3 + type_map = ["foo", "bar"] + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + args64 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + args64[0] = args64[0].to(paddle.float64) + args32 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + args32[0] = args32[0].to(paddle.float32) + # fparam, aparam are converted to coordinate precision by model + fparam = to_paddle_tensor(fparam) + aparam = to_paddle_tensor(aparam) + + model_l_ret_64 = md1.forward_common_lower(*args64, fparam=fparam, aparam=aparam) + model_l_ret_32 = md1.forward_common_lower(*args32, fparam=fparam, aparam=aparam) + + for ii in model_l_ret_32.keys(): + if ii[-4:] == "redu": + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float32) + if ii != "mask": + self.assertEqual(model_l_ret_64[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_64[ii].dtype, paddle.int32) + np.testing.assert_allclose( + to_numpy_array(model_l_ret_32[ii]), + to_numpy_array(model_l_ret_64[ii]), + atol=self.atol, + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + md0.get_rcut() + md0.get_type_map() + + +class TestDPModelFormatNlist(unittest.TestCase): + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 5 + self.nf, self.nt = 1, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + [2.3, 0, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall * 3]) + # sel = [5, 2] + self.sel = [5, 2] + self.expected_nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.atype_ext = np.array([0, 0, 1, 0, 1], dtype="int64").reshape( + [1, self.nall] + ) + self.rcut_smth = 0.4 + self.rcut = 2.0 + + nf, nloc, nnei = self.expected_nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + self.md = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + + def test_nlist_eq(self): + # n_nnei == nnei + nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + def test_nlist_st(self): + # n_nnei < nnei + nlist = np.array( + [ + [1, 3, -1, 2], + [0, -1, -1, 2], + [0, 1, -1, -1], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + def test_nlist_lt(self): + # n_nnei > nnei + nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1, -1, 4], + [0, -1, 4, -1, -1, 2, -1, 3, -1], + [0, 1, -1, -1, -1, 4, -1, -1, 3], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + +class TestEnergyModel(unittest.TestCase, TestCaseSingleFrameWithoutNlist): + def setUp(self): + TestCaseSingleFrameWithoutNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc = self.atype.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward(*args) + ret1 = md1.forward(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["force"]), + to_numpy_array(ret1["force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + ret0 = md0.forward(*args, do_atomic_virial=True) + ret1 = md1.forward(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_virial"]), + to_numpy_array(ret1["atom_virial"]), + atol=self.atol, + ) + coord_ext, atype_ext, mapping, nlist = extend_input_and_build_neighbor_list( + to_paddle_tensor(self.coord), + to_paddle_tensor(self.atype), + self.rcut, + self.sel, + mixed_types=md0.mixed_types(), + box=to_paddle_tensor(self.cell), + ) + args = [coord_ext, atype_ext, nlist] + ret2 = md0.forward_lower(*args, do_atomic_virial=True) + # check the consistency between the reduced virial from + # forward and forward_lower + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret2["virial"]), + atol=self.atol, + ) + + +class TestEnergyModelLower(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_lower(*args) + ret1 = md1.forward_lower(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["extended_force"]), + to_numpy_array(ret1["extended_force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + ret0 = md0.forward_lower(*args, do_atomic_virial=True) + ret1 = md1.forward_lower(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["extended_virial"]), + to_numpy_array(ret1["extended_virial"]), + atol=self.atol, + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + self.assertEqual(md0.get_rcut(), self.rcut) + self.assertEqual(md0.get_type_map(), type_map) diff --git a/source/tests/pd/model/test_embedding_net.py b/source/tests/pd/model/test_embedding_net.py new file mode 100644 index 0000000000..12c42049e8 --- /dev/null +++ b/source/tests/pd/model/test_embedding_net.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import re +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +from deepmd.pd.utils import ( + env, +) + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.pd.model.descriptor import ( + DescrptSeA, +) +from deepmd.pd.utils import ( + dp_random, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf + +from ..test_finetune import ( + energy_data_requirement, +) + +CUR_DIR = os.path.dirname(__file__) + + +def gen_key(worb, depth, elemid): + return (worb, depth, elemid) + + +def get_single_batch(dataset, index=None): + if index is None: + index = dp_random.choice(np.arange(len(dataset))) + np_batch = dataset[index] + pt_batch = {} + + for key in [ + "coord", + "box", + "force", + "force_mag", + "energy", + "virial", + "atype", + "natoms", + ]: + if key in np_batch.keys(): + np_batch[key] = np.expand_dims(np_batch[key], axis=0) + pt_batch[key] = paddle.to_tensor(np_batch[key]).to(device=env.DEVICE) + if key in ["coord", "force", "force_mag"]: + np_batch[key] = np_batch[key].reshape(1, -1) + np_batch["natoms"] = np_batch["natoms"][0] + return np_batch, pt_batch + + +def base_se_a(descriptor, coord, atype, natoms, box): + g = tf.Graph() + with g.as_default(): + name_pfx = "d_sea_" + t_coord = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_coord" + ) + t_atype = tf.placeholder(tf.int32, [None, None], name=name_pfx + "t_type") + t_natoms = tf.placeholder( + tf.int32, [descriptor.ntypes + 2], name=name_pfx + "t_natoms" + ) + t_box = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_box" + ) + t_default_mesh = tf.placeholder(tf.int32, [None], name=name_pfx + "t_mesh") + t_embedding = descriptor.build( + t_coord, t_atype, t_natoms, t_box, t_default_mesh, input_dict={} + ) + fake_energy = tf.reduce_sum(t_embedding) + t_force = descriptor.prod_force_virial(fake_energy, t_natoms)[0] + t_vars = {} + for var in tf.global_variables(): + ms = re.findall(r"([a-z]+)_(\d)_(\d)", var.name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[0], depth=int(m[1]), elemid=int(m[2])) + t_vars[key] = var + init_op = tf.global_variables_initializer() + + with tf.Session(graph=g) as sess: + sess.run(init_op) + embedding, force, values = sess.run( + [t_embedding, t_force, t_vars], + feed_dict={ + t_coord: coord, + t_atype: atype, + t_natoms: natoms, + t_box: box, + t_default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return embedding, force, values + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(0) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSetForLoader( + self.systems[0], + model_config["type_map"], + ) + ds.add_data_requirement(energy_data_requirement) + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.np_batch, self.paddle_batch = get_single_batch(ds) + + def test_consistency(self): + dp_d = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + seed=1, + ) + dp_embedding, dp_force, dp_vars = base_se_a( + descriptor=dp_d, + coord=self.np_batch["coord"], + atype=self.np_batch["atype"], + natoms=self.np_batch["natoms"], + box=self.np_batch["box"], + ) + + # Reproduced + descriptor = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ).to(DEVICE) + for name, param in descriptor.named_parameters(): + ms = re.findall(r"(\d)\.layers\.(\d)\.([a-z]+)", name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) + var = dp_vars[key] + with paddle.no_grad(): + # Keep parameter value consistency between 2 implentations + paddle.assign(var, param) + + pt_coord = self.paddle_batch["coord"].to(env.DEVICE) + pt_coord.stop_gradient = False + + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + pt_coord, + self.paddle_batch["atype"].to(env.DEVICE), + self.rcut, + self.sel, + mixed_types=False, + box=self.paddle_batch["box"].to(env.DEVICE), + ) + descriptor_out, _, _, _, _ = descriptor( + extended_coord, + extended_atype, + nlist, + ) + my_embedding = descriptor_out.cpu().detach().numpy() + fake_energy = paddle.sum(descriptor_out) + fake_energy.backward() + my_force = -pt_coord.grad.cpu().numpy() + + # Check + np.testing.assert_allclose(dp_embedding, my_embedding) + dp_force = dp_force.reshape(*my_force.shape) + np.testing.assert_allclose(dp_force, my_force) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_ener_fitting.py b/source/tests/pd/model/test_ener_fitting.py new file mode 100644 index 0000000000..dd13f139dc --- /dev/null +++ b/source/tests/pd/model/test_ener_fitting.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.fitting import InvarFitting as DPInvarFitting +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestInvarFitting(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + atype = paddle.to_tensor(self.atype_ext[:, :nloc], dtype="int64").to( + device=env.DEVICE + ) + + for od, mixed_types, nfp, nap, et, nn in itertools.product( + [1, 3], + [True, False], + [0, 3], + [0, 4], + [[], [0], [1]], + [[4, 4, 4], []], + ): + ft0 = InvarFitting( + "foo", + self.nt, + dd0.dim_out, + od, + numb_fparam=nfp, + numb_aparam=nap, + mixed_types=mixed_types, + exclude_types=et, + neuron=nn, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + ft1 = DPInvarFitting.deserialize(ft0.serialize()) + ft2 = InvarFitting.deserialize(ft0.serialize()) + + if nfp > 0: + ifp = paddle.to_tensor( + rng.normal(size=(self.nf, nfp)), dtype=dtype, place=env.DEVICE + ) + else: + ifp = None + if nap > 0: + iap = paddle.to_tensor( + rng.normal(size=(self.nf, self.nloc, nap)), + dtype=dtype, + place=env.DEVICE, + ) + else: + iap = None + + ret0 = ft0(rd0, atype, fparam=ifp, aparam=iap) + ret1 = ft1( + rd0.detach().cpu().numpy(), + atype.detach().cpu().numpy(), + fparam=to_numpy_array(ifp), + aparam=to_numpy_array(iap), + ) + ret2 = ft2(rd0, atype, fparam=ifp, aparam=iap) + np.testing.assert_allclose( + to_numpy_array(ret0["foo"]), + ret1["foo"], + ) + np.testing.assert_allclose( + to_numpy_array(ret0["foo"]), + to_numpy_array(ret2["foo"]), + ) + self.assertEqual(ft0.get_sel_type(), ft1.get_sel_type()) + + def test_jit( + self, + ): + for od, mixed_types, nfp, nap, et in itertools.product( + [1, 3], + [True, False], + [0, 3], + [0, 4], + [[], [0]], + ): + ft0 = InvarFitting( + "foo", + self.nt, + 9, + od, + numb_fparam=nfp, + numb_aparam=nap, + mixed_types=mixed_types, + exclude_types=et, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + paddle.jit.to_static(ft0) + + def test_get_set(self): + ifn0 = InvarFitting( + "energy", + self.nt, + 3, + 1, + seed=GLOBAL_SEED, + ) + rng = np.random.default_rng(GLOBAL_SEED) + foo = rng.normal([3, 4]) + for ii in [ + "bias_atom_e", + "fparam_avg", + "fparam_inv_std", + "aparam_avg", + "aparam_inv_std", + ]: + ifn0[ii] = paddle.to_tensor(foo, dtype=dtype).to(device=env.DEVICE) + np.testing.assert_allclose( + foo, np.reshape(ifn0[ii].detach().cpu().numpy(), foo.shape) + ) diff --git a/source/tests/pd/model/test_env_mat.py b/source/tests/pd/model/test_env_mat.py new file mode 100644 index 0000000000..7cbc698264 --- /dev/null +++ b/source/tests/pd/model/test_env_mat.py @@ -0,0 +1,187 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.utils import ( + EnvMat, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestCaseSingleFrameWithNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 4 + self.nf, self.nt = 2, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall, 3]) + self.atype_ext = np.array([0, 0, 1, 0], dtype="int64").reshape([1, self.nall]) + self.mapping = np.array([0, 1, 2, 0], dtype="int64").reshape([1, self.nall]) + # sel = [5, 2] + self.sel = [5, 2] + self.sel_mix = [7] + self.natoms = [3, 3, 2, 1] + self.nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 2.2 + self.rcut_smth = 0.4 + # permutations + self.perm = np.array([2, 0, 1, 3], dtype=np.int32) + inv_perm = np.array([1, 2, 0, 3], dtype=np.int32) + # permute the coord and atype + self.coord_ext = np.concatenate( + [self.coord_ext, self.coord_ext[:, self.perm, :]], axis=0 + ).reshape(self.nf, self.nall * 3) + self.atype_ext = np.concatenate( + [self.atype_ext, self.atype_ext[:, self.perm]], axis=0 + ) + self.mapping = np.concatenate( + [self.mapping, self.mapping[:, self.perm]], axis=0 + ) + + # permute the nlist + nlist1 = self.nlist[:, self.perm[: self.nloc], :] + mask = nlist1 == -1 + nlist1 = inv_perm[nlist1] + nlist1 = np.where(mask, -1, nlist1) + self.nlist = np.concatenate([self.nlist, nlist1], axis=0) + self.atol = 1e-12 + + +class TestCaseSingleFrameWithNlistWithVirtual: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 4 + self.nall = 5 + self.nf, self.nt = 2, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall, 3]) + self.atype_ext = np.array([0, -1, 0, 1, 0], dtype="int64").reshape( + [1, self.nall] + ) + # sel = [5, 2] + self.sel = [5, 2] + self.sel_mix = [7] + self.natoms = [3, 3, 2, 1] + self.nlist = np.array( + [ + [2, 4, -1, -1, -1, 3, -1], + [-1, -1, -1, -1, -1, -1, -1], + [0, -1, -1, -1, -1, 3, -1], + [0, 2, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 2.2 + self.rcut_smth = 0.4 + # permutations + self.perm = np.array([3, 0, 1, 2, 4], dtype=np.int32) + inv_perm = np.argsort(self.perm) + # permute the coord and atype + self.coord_ext = np.concatenate( + [self.coord_ext, self.coord_ext[:, self.perm, :]], axis=0 + ).reshape(self.nf, self.nall * 3) + self.atype_ext = np.concatenate( + [self.atype_ext, self.atype_ext[:, self.perm]], axis=0 + ) + # permute the nlist + nlist1 = self.nlist[:, self.perm[: self.nloc], :] + mask = nlist1 == -1 + nlist1 = inv_perm[nlist1] + nlist1 = np.where(mask, -1, nlist1) + self.nlist = np.concatenate([self.nlist, nlist1], axis=0) + self.get_real_mapping = np.array([[0, 2, 3], [0, 1, 3]], dtype=np.int32) + self.atol = 1e-12 + + +class TestCaseSingleFrameWithoutNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nf, self.nt = 1, 2 + self.coord = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + ], + dtype=np.float64, + ).reshape([1, self.nloc * 3]) + self.atype = np.array([0, 0, 1], dtype="int64").reshape([1, self.nloc]) + self.cell = 2.0 * np.eye(3).reshape([1, 9]) + # sel = [5, 2] + self.sel = [16, 8] + self.sel_mix = [24] + self.natoms = [3, 3, 2, 1] + self.rcut = 2.2 + self.rcut_smth = 0.4 + self.atol = 1e-12 + + +# to be merged with the tf test case +class TestEnvMat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + em0 = EnvMat(self.rcut, self.rcut_smth) + mm0, diff0, ww0 = em0.call( + self.coord_ext, self.atype_ext, self.nlist, davg, dstd + ) + mm1, diff1, ww1 = prod_env_mat( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext[:, :nloc], dtype="int64").to( + device=env.DEVICE + ), + paddle.to_tensor(davg).to(device=env.DEVICE), + paddle.to_tensor(dstd).to(device=env.DEVICE), + self.rcut, + self.rcut_smth, + ) + np.testing.assert_allclose(mm0, mm1.detach().cpu().numpy()) + np.testing.assert_allclose(diff0, diff1.detach().cpu().numpy()) + np.testing.assert_allclose(ww0, ww1.detach().cpu().numpy()) + np.testing.assert_allclose(mm0[0][self.perm[: self.nloc]], mm0[1]) diff --git a/source/tests/pd/model/test_exclusion_mask.py b/source/tests/pd/model/test_exclusion_mask.py new file mode 100644 index 0000000000..ff479ee7db --- /dev/null +++ b/source/tests/pd/model/test_exclusion_mask.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.exclude_mask import ( + AtomExcludeMask, + PairExcludeMask, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestAtomExcludeMask(unittest.TestCase): + def test_build_type_exclude_mask(self): + nf = 2 + nt = 3 + exclude_types = [0, 2] + atype = np.array( + [ + [0, 2, 1, 2, 0, 1, 0], + [1, 2, 0, 0, 2, 2, 1], + ], + dtype=np.int32, + ).reshape([nf, -1]) + expected_mask = np.array( + [ + [0, 0, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 1], + ] + ).reshape([nf, -1]) + des = AtomExcludeMask(nt, exclude_types=exclude_types) + mask = des(to_paddle_tensor(atype)) + np.testing.assert_equal(to_numpy_array(mask), expected_mask) + + +# to be merged with the tf test case +class TestPairExcludeMask(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_build_type_exclude_mask(self): + exclude_types = [[0, 1]] + expected_mask = np.array( + [ + [1, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 0, 1], + [0, 0, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 0, 1], + ] + ).reshape(self.nf, self.nloc, sum(self.sel)) + des = PairExcludeMask(self.nt, exclude_types=exclude_types).to(env.DEVICE) + mask = des( + to_paddle_tensor(self.nlist), + to_paddle_tensor(self.atype_ext), + ) + np.testing.assert_equal(to_numpy_array(mask), expected_mask) diff --git a/source/tests/pd/model/test_fitting_net.py b/source/tests/pd/model/test_fitting_net.py new file mode 100644 index 0000000000..9a4d4d128f --- /dev/null +++ b/source/tests/pd/model/test_fitting_net.py @@ -0,0 +1,148 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import re +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +from deepmd.pd.model.task import ( + EnergyFittingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.tf.fit.ener import ( + EnerFitting, +) + +from ...seed import ( + GLOBAL_SEED, +) + + +class FakeDescriptor: + def __init__(self, ntypes, embedding_width): + self._ntypes = ntypes + self._dim_out = embedding_width + + def get_ntypes(self): + return self._ntypes + + def get_dim_out(self): + return self._dim_out + + +def gen_key(type_id, layer_id, w_or_b): + return (type_id, layer_id, w_or_b) + + +def base_fitting_net(dp_fn, embedding, natoms, atype): + g = tf.Graph() + with g.as_default(): + t_embedding = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + t_natoms = tf.placeholder(tf.int32, [None]) + t_atype = tf.placeholder(tf.int32, [None, None]) + t_energy = dp_fn.build(t_embedding, t_natoms, {"atype": t_atype}) + init_op = tf.global_variables_initializer() + t_vars = {} + for var in tf.global_variables(): + key = None + matched = re.match(r"layer_(\d)_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(2), + layer_id=matched.group(1), + w_or_b=matched.group(3), + ) + else: + matched = re.match(r"final_layer_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(1), layer_id=-1, w_or_b=matched.group(2) + ) + if key is not None: + t_vars[key] = var + + with tf.Session(graph=g) as sess: + sess.run(init_op) + energy, values = sess.run( + [t_energy, t_vars], + feed_dict={ + t_embedding: embedding, + t_natoms: natoms, + t_atype: atype, + }, + ) + tf.reset_default_graph() + return energy, values + + +class TestFittingNet(unittest.TestCase): + def setUp(self): + nloc = 7 + self.embedding_width = 30 + self.natoms = np.array([nloc, nloc, 2, 5], dtype=np.int32) + rng = np.random.default_rng(GLOBAL_SEED) + self.embedding = rng.uniform(size=[4, nloc * self.embedding_width]) + self.ntypes = self.natoms.size - 2 + self.n_neuron = [32, 32, 32] + self.atype = np.zeros([4, nloc], dtype=np.int32) + cnt = 0 + for i in range(self.ntypes): + self.atype[:, cnt : cnt + self.natoms[i + 2]] = i + cnt += self.natoms[i + 2] + + fake_d = FakeDescriptor(2, 30) + self.dp_fn = EnerFitting( + fake_d.get_ntypes(), fake_d.get_dim_out(), self.n_neuron + ) + self.dp_fn.bias_atom_e = rng.uniform(size=[self.ntypes]) + + def test_consistency(self): + dp_energy, values = base_fitting_net( + self.dp_fn, self.embedding, self.natoms, self.atype + ) + my_fn = EnergyFittingNet( + self.ntypes, + self.embedding_width, + neuron=self.n_neuron, + bias_atom_e=self.dp_fn.bias_atom_e, + mixed_types=False, + ).to(env.DEVICE) + for name, param in my_fn.named_parameters(): + matched = re.match( + r"filter_layers\.networks\.(\d).layers\.(\d)\.([a-z]+)", name + ) + key = None + if matched: + if int(matched.group(2)) == len(self.n_neuron): + layer_id = -1 + else: + layer_id = matched.group(2) + key = gen_key( + type_id=matched.group(1), + layer_id=layer_id, + w_or_b=matched.group(3), + ) + assert key is not None + var = values[key] + with paddle.no_grad(): + # Keep parameter value consistency between 2 implentations + paddle.assign(var, param) + embedding = paddle.to_tensor(self.embedding) + embedding = embedding.reshape([4, -1, self.embedding_width]) + atype = paddle.to_tensor(self.atype) + ret = my_fn(embedding.to(env.DEVICE), atype.to(env.DEVICE)) + my_energy = ret["energy"] + my_energy = my_energy.detach().cpu() + np.testing.assert_allclose(dp_energy, my_energy.numpy().reshape([-1])) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py new file mode 100644 index 0000000000..ac8d0f54fc --- /dev/null +++ b/source/tests/pd/model/test_forward_lower.py @@ -0,0 +1,208 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dpa1, + model_dpa2, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +def reduce_tensor(extended_tensor, mapping, nloc: int): + nframes, nall = extended_tensor.shape[:2] + ext_dims = extended_tensor.shape[2:] + reduced_tensor = paddle.zeros( + [nframes, nloc, *ext_dims], + dtype=extended_tensor.dtype, + ).to(device=extended_tensor.place) + mldims = list(mapping.shape) + mapping = mapping.reshape(mldims + [1] * len(ext_dims)).expand( + [-1] * len(mldims) + list(ext_dims) + ) + # nf x nloc x (*ext_dims) + reduced_tensor = decomp.scatter_reduce( + reduced_tensor, + 1, + index=mapping, + src=extended_tensor, + reduce="sum", + ) + return reduced_tensor + + +class ForwardLowerTest: + def test( + self, + ): + prec = self.prec + natoms = 5 + cell = 4.0 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + generator = paddle.seed(GLOBAL_SEED) + coord = 3.0 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + spin = 0.5 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int64).to( + device=env.DEVICE + ) + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag"] + + result_forward = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord.unsqueeze(0), + atype.unsqueeze(0), + self.model.get_rcut() + 1.0 + if test_spin + else self.model.get_rcut(), # buffer region for spin nlist + self.model.get_sel(), + mixed_types=self.model.mixed_types(), + box=cell.unsqueeze(0), + ) + extended_spin = decomp.take_along_axis( + spin.unsqueeze(0), indices=mapping.unsqueeze(-1).tile((1, 1, 3)), axis=1 + ) + input_dict = { + "extended_coord": extended_coord, + "extended_atype": extended_atype, + "nlist": nlist, + "mapping": mapping, + "do_atomic_virial": False, + } + if test_spin: + input_dict["extended_spin"] = extended_spin + result_forward_lower = self.model.forward_lower(**input_dict) + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + result_forward_lower[key].numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key in ["force", "force_mag"]: + reduced_vv = reduce_tensor( + result_forward_lower[f"extended_{key}"], mapping, natoms + ) + np.testing.assert_allclose( + reduced_vv.numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + result_forward_lower[key].numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_se_e2_a) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_dpa1) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_dpa2) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_zbl) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinDPA1(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + model_params["descriptor"] = copy.deepcopy(model_dpa1)["descriptor"] + # double sel for virtual atoms to avoid large error + model_params["descriptor"]["sel"] *= 2 + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinDPA2(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + model_params["descriptor"] = copy.deepcopy(model_dpa2)["descriptor"] + # double sel for virtual atoms to avoid large error + model_params["descriptor"]["repinit"]["nsel"] *= 2 + model_params["descriptor"]["repformer"]["nsel"] *= 2 + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_get_model.py b/source/tests/pd/model/test_get_model.py new file mode 100644 index 0000000000..7ace7c4e43 --- /dev/null +++ b/source/tests/pd/model/test_get_model.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +dtype = paddle.float64 + +model_se_e2_a = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, + "atom_exclude_types": [1], + "pair_exclude_types": [[1, 2]], + "preset_out_bias": { + "energy": [ + None, + [1.0], + [3.0], + ] + }, +} + + +class TestGetModel(unittest.TestCase): + def test_model_attr(self): + model_params = copy.deepcopy(model_se_e2_a) + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual( + atomic_model.preset_out_bias, + { + "energy": [ + None, + np.array([1.0]), + np.array([3.0]), + ] + }, + ) + self.assertEqual(atomic_model.atom_exclude_types, [1]) + self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]]) + + def test_model_attr_energy_float(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": ["1.", 3, None]} + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual( + atomic_model.preset_out_bias, + { + "energy": [ + np.array([1.0]), + np.array([3.0]), + None, + ] + }, + ) + self.assertEqual(atomic_model.atom_exclude_types, [1]) + self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]]) + + def test_model_attr_energy_unsupported_type(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": [1.0 + 2.0j, 3, None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) + + def test_model_attr_energy_unsupported_value(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": ["1.0 + 2.0j", 3, None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) + + def test_notset_model_attr(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params.pop("atom_exclude_types") + model_params.pop("pair_exclude_types") + model_params.pop("preset_out_bias") + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual(atomic_model.preset_out_bias, None) + self.assertEqual(atomic_model.atom_exclude_types, []) + self.assertEqual(atomic_model.pair_exclude_types, []) + + def test_preset_wrong_len(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": [None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_mlp.py b/source/tests/pd/model/test_mlp.py new file mode 100644 index 0000000000..90653644d3 --- /dev/null +++ b/source/tests/pd/model/test_mlp.py @@ -0,0 +1,283 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.utils import EmbeddingNet as DPEmbeddingNet +from deepmd.dpmodel.utils import FittingNet as DPFittingNet +from deepmd.dpmodel.utils import ( + NativeLayer, + NativeNet, +) +from deepmd.pd.model.network.mlp import ( + MLP, + EmbeddingNet, + FittingNet, + MLPLayer, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + + +def get_tols(prec): + if prec in ["single", "float32"]: + rtol, atol = 0.0, 1e-4 + elif prec in ["double", "float64"]: + rtol, atol = 0.0, 1e-12 + # elif prec in ["half", "float16"]: + # rtol, atol=1e-2, 0 + else: + raise ValueError(f"unknown prec {prec}") + return rtol, atol + + +class TestMLPLayer(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [(5, 5), (5, 10), (5, 8), (8, 5)], # inp, out + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_layer( + self, + ): + for (ninp, nout), bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ninp] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = ( + paddle.arange(np.prod(inp_shap), dtype=dtype) + .to(device=env.DEVICE) + .reshape(inp_shap) + ) + # def mlp layer + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec).to( + env.DEVICE + ) + # check consistency + nl = NativeLayer.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLPLayer.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for (ninp, nout), bias, ut, ac, resnet, _, prec in self.test_cases: + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec) + model = paddle.jit.to_static(ml) + ml1 = MLPLayer.deserialize(ml.serialize()) + model = paddle.jit.to_static(ml1) + + +class TestMLP(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [[2, 2, 4, 8], [1, 3, 3]], # inp and hiddens + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_net( + self, + ): + for ndims, bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ndims[0]] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = ( + paddle.arange(np.prod(inp_shap), dtype=dtype) + .to(device=env.DEVICE) + .reshape(inp_shap) + ) + # def MLP + layers = [] + for ii in range(1, len(ndims)): + layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(layers).to(env.DEVICE) + # check consistency + nl = NativeNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLP.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for ndims, bias, ut, ac, resnet, _, prec in self.test_cases: + layers = [] + for ii in range(1, len(ndims)): + ml = layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(ml) + model = paddle.jit.to_static(ml) + ml1 = MLP.deserialize(ml.serialize()) + model = paddle.jit.to_static(ml1) + + +class TestEmbeddingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + ) + + def test_match_embedding_net( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = paddle.arange(idim, dtype=dtype).to(device=env.DEVICE) + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec).to(env.DEVICE) + # check consistency + nl = DPEmbeddingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = EmbeddingNet.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec).to(env.DEVICE) + ml1 = EmbeddingNet.deserialize(ml.serialize()).to(env.DEVICE) + model = paddle.jit.to_static(ml) + model = paddle.jit.to_static(ml1) + + +class TestFittingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [1, 5], # out + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + [True, False], # bias_out + ) + + def test_match_fitting_net( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = paddle.arange(idim, dtype=dtype).to(device=env.DEVICE) + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ).to(env.DEVICE) + # check consistency + nl = DPFittingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = FittingNet.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ).to(env.DEVICE) + ml1 = FittingNet.deserialize(ml.serialize()).to(env.DEVICE) + model = paddle.jit.to_static(ml) + model = paddle.jit.to_static(ml1) diff --git a/source/tests/pd/model/test_model.py b/source/tests/pd/model/test_model.py new file mode 100644 index 0000000000..1bdc1aa74d --- /dev/null +++ b/source/tests/pd/model/test_model.py @@ -0,0 +1,424 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import collections +import json +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +from deepmd.pd.utils import ( + env, +) + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.pd.utils.learning_rate import LearningRateExp as MyLRExp +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf +from deepmd.tf.fit import ( + EnerFitting, +) +from deepmd.tf.loss import ( + EnerStdLoss, +) +from deepmd.tf.model import ( + EnerModel, +) +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.tf.utils.learning_rate import ( + LearningRateExp, +) + +from ..test_finetune import ( + energy_data_requirement, +) + +VariableState = collections.namedtuple("VariableState", ["value", "gradient"]) + + +def paddle2tf(paddle_name, last_layer_id=None): + fields = paddle_name.split(".") + offset = int(fields[3] == "networks") + 1 + element_id = int(fields[2 + offset]) + if fields[1] == "descriptor": + layer_id = int(fields[4 + offset]) + 1 + weight_type = fields[5 + offset] + ret = "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id) + elif fields[1] == "fitting_net": + layer_id = int(fields[4 + offset]) + weight_type = fields[5 + offset] + if layer_id != last_layer_id: + ret = "layer_%d_type_%d/%s:0" % (layer_id, element_id, weight_type) + else: + ret = "final_layer_type_%d/%s:0" % (element_id, weight_type) + else: + raise RuntimeError(f"Unexpected parameter name: {paddle_name}") + return ret + + +class DpTrainer: + def __init__(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.batch_size = config["training"]["training_data"]["batch_size"] + self.type_map = model_config["type_map"] + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.n_neuron = model_config["fitting_net"]["neuron"] + self.data_stat_nbatch = 3 + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = 500 + self.stop_steps = 1600 + self.start_pref_e = 1.0 + self.limit_pref_e = 2.0 + self.start_pref_f = 2.0 + self.limit_pref_f = 1.0 + self.ntypes = len(self.type_map) + + def get_intermediate_state(self, num_steps=1): + dp_model = self._get_dp_model() + dp_loss = self._get_dp_loss() + dp_lr = self._get_dp_lr() + dp_ds = self._get_dp_dataset() + dp_ds.add_data_requirements(dp_model.input_requirement) + dp_ds.add_data_requirements(dp_loss.label_requirement) + dp_model.data_stat(dp_ds) + + # Build graph + g = tf.Graph() + with g.as_default(): + place_holders = self._get_dp_placeholders(dp_ds) + model_pred = dp_model.build( + coord_=place_holders["coord"], + atype_=place_holders["type"], + natoms=place_holders["natoms_vec"], + box=place_holders["box"], + mesh=place_holders["default_mesh"], + input_dict=place_holders, + ) + global_step = tf.train.get_or_create_global_step() + learning_rate = dp_lr.build(global_step, self.stop_steps) + l2_l, _ = dp_loss.build( + learning_rate=learning_rate, + natoms=place_holders["natoms_vec"], + model_dict=model_pred, + label_dict=place_holders, + suffix="test", + ) + t_vars = tf.trainable_variables() + optimizer = tf.train.AdamOptimizer(learning_rate) + t_grad_and_vars = optimizer.compute_gradients(l2_l, t_vars) + train_op = optimizer.apply_gradients(t_grad_and_vars, global_step) + init_op = tf.global_variables_initializer() + t_heads = { + "loss": l2_l, + "energy": model_pred["energy"], + "force": model_pred["force"], + "virial": model_pred["virial"], + "atom_virial": model_pred["atom_virial"], + } + + # Get statistics of each component + stat_dict = { + "descriptor.mean": dp_model.descrpt.davg, + "descriptor.stddev": dp_model.descrpt.dstd, + "fitting_net.bias_atom_e": dp_model.fitting.bias_atom_e, + } + + # Get variables and their gradients + with tf.Session(graph=g) as sess: + sess.run(init_op) + for _ in range(num_steps): + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + sess.run(train_op, feed_dict=feeds) + + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + grads_and_vars, head_dict = sess.run( + [t_grad_and_vars, t_heads], feed_dict=feeds + ) + vs_dict = {} + for idx, one in enumerate(t_vars): + grad, var = grads_and_vars[idx] + vs_dict[one.name] = VariableState(var, grad) + + tf.reset_default_graph() + # Used for reproducing + return batch, head_dict, stat_dict, vs_dict + + def _get_dp_dataset(self): + data = DeepmdDataSystem( + systems=self.systems, + batch_size=self.batch_size, + test_size=1, + rcut=self.rcut, + type_map=self.type_map, + trn_all_set=True, + ) + return data + + def _get_dp_model(self): + dp_descrpt = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ) + dp_fitting = EnerFitting( + dp_descrpt.get_ntypes(), dp_descrpt.get_dim_out(), neuron=self.n_neuron + ) + return EnerModel( + dp_descrpt, + dp_fitting, + type_map=self.type_map, + data_stat_nbatch=self.data_stat_nbatch, + ) + + def _get_dp_loss(self): + return EnerStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + def _get_dp_lr(self): + return LearningRateExp( + start_lr=self.start_lr, stop_lr=self.stop_lr, decay_steps=self.decay_steps + ) + + def _get_dp_placeholders(self, dataset): + place_holders = {} + data_dict = dataset.get_data_dict() + for kk in data_dict.keys(): + if kk == "type": + continue + prec = tf.float64 + place_holders[kk] = tf.placeholder(prec, [None], name="t_" + kk) + place_holders["find_" + kk] = tf.placeholder( + tf.float32, name="t_find_" + kk + ) + place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type") + place_holders["natoms_vec"] = tf.placeholder( + tf.int32, [self.ntypes + 2], name="t_natoms" + ) + place_holders["default_mesh"] = tf.placeholder(tf.int32, [None], name="t_mesh") + place_holders["is_training"] = tf.placeholder(tf.bool) + return place_holders + + def _get_feed_dict(self, batch, place_holders): + feed_dict = {} + for kk in batch.keys(): + if kk == "find_type" or kk == "type": + continue + if "find_" in kk: + feed_dict[place_holders[kk]] = batch[kk] + else: + feed_dict[place_holders[kk]] = np.reshape(batch[kk], [-1]) + for ii in ["type"]: + feed_dict[place_holders[ii]] = np.reshape(batch[ii], [-1]) + for ii in ["natoms_vec", "default_mesh"]: + feed_dict[place_holders[ii]] = batch[ii] + feed_dict[place_holders["is_training"]] = True + return feed_dict + + +class TestEnergy(unittest.TestCase): + def setUp(self): + self.dp_trainer = DpTrainer() + self.wanted_step = 0 + for key in dir(self.dp_trainer): + if not key.startswith("_") or key == "get_intermediate_state": + value = getattr(self.dp_trainer, key) + setattr(self, key, value) + + def test_consistency(self): + batch, head_dict, stat_dict, vs_dict = self.dp_trainer.get_intermediate_state( + self.wanted_step + ) + # Build DeePMD graph + my_ds = DpLoaderSet(self.systems, self.batch_size, self.type_map) + my_ds.add_data_requirement(energy_data_requirement) + my_model = get_model( + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut_smth": self.rcut_smth, + "rcut": self.rcut, + "neuron": self.filter_neuron, + "axis_neuron": self.axis_neuron, + }, + "fitting_net": {"neuron": self.n_neuron, "mixed_types": False}, + "data_stat_nbatch": self.data_stat_nbatch, + "type_map": self.type_map, + }, + ) + my_model.to(DEVICE) + my_lr = MyLRExp(self.start_lr, self.stop_lr, self.decay_steps, self.stop_steps) + my_loss = EnergyStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + # Keep statistics consistency between 2 implentations + my_em = my_model.get_descriptor() + mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4]) + stddev = stat_dict["descriptor.stddev"].reshape( + [self.ntypes, my_em.get_nsel(), 4] + ) + my_em.set_stat_mean_and_stddev( + paddle.to_tensor(mean).to(device=DEVICE), + paddle.to_tensor(stddev).to(device=DEVICE), + ) + my_model.get_fitting_net().bias_atom_e = paddle.to_tensor( + stat_dict["fitting_net.bias_atom_e"], place=DEVICE + ) + + # Keep parameter value consistency between 2 implentations + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = paddle2tf(name, last_layer_id=len(self.n_neuron)) + var = vs_dict[var_name].value + with paddle.no_grad(): + src = paddle.to_tensor(var) + dst = param + # print(name) + # print(src.mean(), src.std()) + # print(dst.mean(), dst.std()) + paddle.assign(src, dst) + # Start forward computing + tmp = np.copy(batch["natoms_vec"]) + batch = my_ds.systems[0]._data_system._get_subdata(batch, 0) + batch = my_ds.systems[0]._data_system.reformat_data_torch(batch) + for key in ["coord", "atype", "box", "energy", "force"]: + batch[key] = paddle.to_tensor(batch[key]).to(device=env.DEVICE) + batch[key] = batch[key].unsqueeze(0) + batch["coord"].stop_gradient = False + batch["natoms_vec"] = tmp + batch["natoms"] = paddle.to_tensor( + batch["natoms_vec"], place=batch["coord"].place + ).unsqueeze(0) + model_input = { + "coord": batch["coord"].to(env.DEVICE), + "atype": batch["atype"].to(env.DEVICE), + "box": batch["box"].to(env.DEVICE), + "do_atomic_virial": True, + } + model_input_1 = { + "coord": batch["coord"].to(env.DEVICE), + "atype": batch["atype"].to(env.DEVICE), + "box": batch["box"].to(env.DEVICE), + "do_atomic_virial": False, + } + label = { + "energy": batch["energy"].to(env.DEVICE), + "find_energy": 1.0, + "force": batch["force"].to(env.DEVICE), + "find_force": 1.0, + } + cur_lr = my_lr.value(self.wanted_step) + model_predict, loss, _ = my_loss( + model_input, my_model, label, int(batch["natoms"][0, 0]), cur_lr + ) + model_predict_1 = my_model(**model_input_1) + p_energy, p_force, p_virial, p_atomic_virial = ( + model_predict["energy"], + model_predict["force"], + model_predict["virial"], + model_predict["atom_virial"], + ) + np.testing.assert_allclose( + head_dict["energy"], p_energy.reshape([-1]).cpu().detach().numpy() + ) + np.testing.assert_allclose( + head_dict["force"], + p_force.reshape(head_dict["force"].shape).cpu().detach().numpy(), + ) + rtol = 1e-5 + atol = 1e-8 + np.testing.assert_allclose( + head_dict["loss"], loss.cpu().detach().numpy(), rtol=rtol, atol=atol + ) + np.testing.assert_allclose( + head_dict["virial"], + p_virial.reshape(head_dict["virial"].shape).cpu().detach().numpy(), + ) + np.testing.assert_allclose( + head_dict["virial"], + model_predict_1["virial"] + .reshape([*head_dict["virial"].shape]) + .cpu() + .detach() + .numpy(), + ) + self.assertIsNone(model_predict_1.get("atom_virial", None)) + np.testing.assert_allclose( + head_dict["atom_virial"], + p_atomic_virial.reshape(head_dict["atom_virial"].shape) + .cpu() + .detach() + .numpy(), + ) + optimizer = paddle.optimizer.Adam( + learning_rate=cur_lr, parameters=my_model.parameters() + ) + optimizer.clear_grad() + + def step(step_id): + bdata = self.training_data.get_trainning_batch() + optimizer.clear_grad() + + # Compare gradient for consistency + loss.backward() + + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = paddle2tf(name, last_layer_id=len(self.n_neuron)) + var_grad = vs_dict[var_name].gradient + param_grad = param.grad.cpu() + var_grad = paddle.to_tensor(var_grad).to(device="cpu") + assert np.allclose(var_grad, param_grad, rtol=rtol, atol=atol) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_nlist.py b/source/tests/pd/model/test_nlist.py new file mode 100644 index 0000000000..0947355ac0 --- /dev/null +++ b/source/tests/pd/model/test_nlist.py @@ -0,0 +1,304 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + build_directional_neighbor_list, + build_multiple_neighbor_list, + build_neighbor_list, + extend_coord_with_ghosts, + get_multiple_nlist_key, +) +from deepmd.pd.utils.region import ( + inter2phys, +) + +dtype = paddle.float64 + + +class TestNeighList(unittest.TestCase): + def setUp(self): + self.nf = 3 + self.nloc = 3 + self.ns = 5 * 5 * 3 + self.nall = self.ns * self.nloc + self.cell = paddle.to_tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place=env.DEVICE + ) + self.icoord = paddle.to_tensor( + [[0, 0, 0], [0, 0, 0], [0.5, 0.5, 0.1]], dtype=dtype, place=env.DEVICE + ) + self.atype = paddle.to_tensor([-1, 0, 1], dtype=paddle.int64).to( + device=env.DEVICE + ) + [self.cell, self.icoord, self.atype] = [ + ii.unsqueeze(0) for ii in [self.cell, self.icoord, self.atype] + ] + self.coord = inter2phys(self.icoord, self.cell).reshape([-1, self.nloc * 3]) + self.cell = self.cell.reshape([-1, 9]) + [self.cell, self.coord, self.atype] = [ + paddle.tile(ii, [self.nf, 1]) for ii in [self.cell, self.coord, self.atype] + ] + self.rcut = 1.01 + self.prec = 1e-10 + self.nsel = [10, 10] + # genrated by preprocess.build_neighbor_list + # ref_nlist, _, _ = legacy_build_neighbor_list( + # 2, ecoord[0], eatype[0], + # self.rcut, + # paddle.to_tensor([10,20], dtype=paddle.int64), + # mapping[0], type_split=True, ) + self.ref_nlist = paddle.to_tensor( + [ + [-1] * sum(self.nsel), + [1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1], + [1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 2, 2, 2, 2, 2, 2, -1, -1, -1, -1], + ], + place=env.DEVICE, + ) + + def test_build_notype(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + # test normal sel + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel), + distinguish_types=False, + ) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + np.testing.assert_allclose( + paddle.sort(nlist_loc, axis=-1).numpy(), + paddle.sort(self.ref_nlist, axis=-1).numpy(), + ) + # test a very large sel + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel) + 300, # +300, real nnei==224 + distinguish_types=False, + ) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + np.testing.assert_allclose( + paddle.sort(nlist_loc, descending=True, axis=-1)[ + :, : sum(self.nsel) + ].numpy(), + paddle.sort(self.ref_nlist, descending=True, axis=-1).numpy(), + ) + + def test_build_type(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + self.nsel, + distinguish_types=True, + ) + np.testing.assert_allclose(nlist[0].numpy(), nlist[1].numpy()) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + for ii in range(2): + np.testing.assert_allclose( + paddle.sort( + paddle.split(nlist_loc, (self.nsel), axis=-1)[ii], axis=-1 + ).numpy(), + paddle.sort( + paddle.split(self.ref_nlist, (self.nsel), axis=-1)[ii], axis=-1 + ).numpy(), + ) + + def test_build_multiple_nlist(self): + rcuts = [1.01, 2.01] + nsels = [20, 80] + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, max(rcuts) + ) + nlist1 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[1], + nsels[1] - 1, + distinguish_types=False, + ) + pad = -1 * paddle.ones([self.nf, self.nloc, 1], dtype=nlist1.dtype).to( + device=nlist1.place + ) + nlist2 = paddle.concat([nlist1, pad], axis=-1) + nlist0 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[0], + nsels[0], + distinguish_types=False, + ) + nlists = build_multiple_neighbor_list(ecoord, nlist1, rcuts, nsels) + for dd in range(2): + self.assertEqual( + nlists[get_multiple_nlist_key(rcuts[dd], nsels[dd])].shape[-1], + nsels[dd], + ) + np.testing.assert_allclose( + nlists[get_multiple_nlist_key(rcuts[0], nsels[0])].numpy(), + nlist0.numpy(), + ) + np.testing.assert_allclose( + nlists[get_multiple_nlist_key(rcuts[1], nsels[1])].numpy(), + nlist2.numpy(), + ) + + def test_extend_coord(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + # expected ncopy x nloc + self.assertEqual(list(ecoord.shape), [self.nf, self.nall * 3]) + self.assertEqual(list(eatype.shape), [self.nf, self.nall]) + self.assertEqual(list(mapping.shape), [self.nf, self.nall]) + # check the nloc part is identical with original coord + np.testing.assert_allclose( + ecoord[:, : self.nloc * 3].numpy(), + self.coord.numpy(), + rtol=self.prec, + atol=self.prec, + ) + # check the shift vectors are aligned with grid + shift_vec = ( + ecoord.reshape([-1, self.ns, self.nloc, 3]) + - self.coord.reshape([-1, self.nloc, 3])[:, None, :, :] + ) + shift_vec = shift_vec.reshape([-1, self.nall, 3]) + # hack!!! assumes identical cell across frames + shift_vec = paddle.matmul( + shift_vec, paddle.linalg.inv(self.cell.reshape([self.nf, 3, 3])[0]) + ) + # nf x nall x 3 + shift_vec = paddle.round(shift_vec) + # check: identical shift vecs + np.testing.assert_allclose( + shift_vec[0].numpy(), shift_vec[1].numpy(), rtol=self.prec, atol=self.prec + ) + # check: shift idx aligned with grid + mm, cc = paddle.unique(shift_vec[0][:, 0], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-2, -1, 0, 1, 2], dtype=dtype) + .to(device=env.DEVICE) + .numpy(), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 5] * 5, dtype=paddle.int64, place=env.DEVICE + ).numpy(), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = paddle.unique(shift_vec[1][:, 1], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-2, -1, 0, 1, 2], dtype=dtype).to(device=env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 5] * 5, dtype=paddle.int64, place=env.DEVICE + ), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = paddle.unique(shift_vec[1][:, 2], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-1, 0, 1], dtype=dtype).to(device=env.DEVICE).numpy(), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 3] * 3, dtype=paddle.int64, place=env.DEVICE + ).numpy(), + rtol=self.prec, + atol=self.prec, + ) + + def test_build_directional_nlist(self): + """Directional nlist is tested against the standard nlist implementation.""" + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + for distinguish_types, mysel in zip([True, False], [sum(self.nsel), 300]): + # full neighbor list + nlist_full = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel), + distinguish_types=distinguish_types, + ) + # central as part of the system + nlist = build_directional_neighbor_list( + ecoord[:, 3:6], + eatype[:, 1:2], + paddle.concat( + [ + ecoord[:, 0:3], + paddle.zeros( + [self.nf, 3], + dtype=dtype, + ).to(device=env.DEVICE), # placeholder + ecoord[:, 6:], + ], + axis=1, + ), + paddle.concat( + [ + eatype[:, 0:1], + -1 + * paddle.ones( + [self.nf, 1], + dtype="int64", + ).to(device=env.DEVICE), # placeholder + eatype[:, 2:], + ], + axis=1, + ), + self.rcut, + mysel, + distinguish_types=distinguish_types, + ) + np.testing.assert_allclose(nlist[0].numpy(), nlist[1].numpy()) + np.testing.assert_allclose(nlist[0].numpy(), nlist[2].numpy()) + np.testing.assert_allclose( + paddle.sort(nlist[0], descending=True, axis=-1)[ + :, : sum(self.nsel) + ].numpy(), + paddle.sort(nlist_full[0][1:2], descending=True, axis=-1).numpy(), + ) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py new file mode 100644 index 0000000000..8482ca7ffe --- /dev/null +++ b/source/tests/pd/model/test_permutation.py @@ -0,0 +1,489 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import os +import unittest + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) + +CUR_DIR = os.path.dirname(__file__) + +dtype = paddle.float64 +import numpy as np + +model_se_e2_a = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, +} + +model_dos = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + "type": "dos", + "numb_dos": 250, + }, + "data_stat_nbatch": 20, +} + +model_zbl = { + "type_map": ["O", "H", "B"], + "use_srtab": f"{CUR_DIR}/water/data/zbl_tab_potential/H2O_tab_potential.txt", + "smin_alpha": 0.1, + "sw_rmin": 0.2, + "sw_rmax": 4.0, + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, +} + + +model_spin = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, + "spin": { + "use_spin": [True, False, False], + "virtual_scale": [0.3140], + "_comment": " that's all", + }, +} + +model_dpa2 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 100, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 40, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": True, + "update_g2_has_g1g1": True, + "update_g2_has_attn": True, + "attn2_has_gate": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + "fitting_net": { + "neuron": [24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + +model_dpa2tebd = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 0.5, + "nsel": 100, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + "three_body_sel": 40, + "three_body_rcut": 4.0, + "three_body_rcut_smth": 3.5, + "use_three_body": True, + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 40, + "nlayers": 6, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": False, + "update_g2_has_g1g1": False, + "update_g2_has_attn": True, + "update_style": "res_residual", + "update_residual": 0.01, + "update_residual_init": "norm", + "attn2_has_gate": True, + "use_sqrt_nnei": True, + "g1_out_conv": True, + "g1_out_mlp": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + "fitting_net": { + "neuron": [24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + +model_dpa1 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + + +model_hybrid = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "hybrid", + "list": [ + { + "type": "se_atten", + "sel": 120, + "rcut_smth": 0.5, + "rcut": 6.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 128, + "attn_layer": 0, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": True, + "temperature": 1.0, + "seed": 1, + }, + { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 30, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 10, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": True, + "update_g2_has_g1g1": True, + "update_g2_has_attn": True, + "attn2_has_gate": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + ], + }, + "fitting_net": { + "neuron": [240, 240, 240], + "resnet_dt": True, + "seed": 1, + "_comment": " that's all", + }, + "_comment": " that's all", +} + +model_property = { + "type_map": ["H", "C", "N", "O"], + "descriptor": { + "type": "se_e2_a", + "sel": [3, 3, 3, 3], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "type": "property", + "task_dim": 3, + "neuron": [24, 24, 24], + "resnet_dt": True, + "bias_method": "normal", + "intensive": True, + "seed": 1, + }, +} + + +class PermutationTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype) + cell = (cell + cell.T) + 5.0 * paddle.eye(3) + coord = paddle.rand([natoms, 3], dtype=dtype) + spin = paddle.rand([natoms, 3], dtype=dtype) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) + idx_perm = [1, 0, 4, 3, 2] + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord[idx_perm].unsqueeze(0), + cell.unsqueeze(0), + atype[idx_perm], + spins=spin[idx_perm].unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + prec = 1e-10 + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + ret0[key][idx_perm].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0[key], ret1[key], rtol=prec, atol=prec + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# self.model = EnergyModelDPAUni(model_params).to(env.DEVICE) + +# natoms = 5 +# cell = paddle.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * paddle.eye(3) +# coord = paddle.rand([natoms, 3], dtype=dtype) +# coord = paddle.matmul(coord, cell) +# atype = paddle.to_tensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_region.py b/source/tests/pd/model/test_region.py new file mode 100644 index 0000000000..7878e73cab --- /dev/null +++ b/source/tests/pd/model/test_region.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.preprocess import ( + Region3D, +) +from deepmd.pd.utils.region import ( + inter2phys, + to_face_distance, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = paddle.float64 + + +class TestRegion(unittest.TestCase): + def setUp(self): + self.cell = paddle.to_tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place="cpu" + ) + self.cell = self.cell.unsqueeze(0).unsqueeze(0) + self.cell = paddle.tile(self.cell, [4, 5, 1, 1]) + self.prec = 1e-8 + + def test_inter_to_phys(self): + generator = paddle.seed(GLOBAL_SEED) + inter = paddle.rand([4, 5, 3, 3], dtype=dtype).to(device="cpu") + phys = inter2phys(inter, self.cell) + for ii in range(4): + for jj in range(5): + expected_phys = paddle.matmul(inter[ii, jj], self.cell[ii, jj]) + np.testing.assert_allclose( + phys[ii, jj].numpy(), + expected_phys.numpy(), + rtol=self.prec, + atol=self.prec, + ) + + def test_to_face_dist(self): + cell0 = self.cell[0][0].numpy() + vol = np.linalg.det(cell0) + # area of surfaces xy, xz, yz + sxy = np.linalg.norm(np.cross(cell0[0], cell0[1])) + sxz = np.linalg.norm(np.cross(cell0[0], cell0[2])) + syz = np.linalg.norm(np.cross(cell0[1], cell0[2])) + # vol / area gives distance + dz = vol / sxy + dy = vol / sxz + dx = vol / syz + dists = to_face_distance(self.cell) + expected = paddle.to_tensor([dx, dy, dz], dtype=dists.dtype).to(device="cpu") + for ii in range(4): + for jj in range(5): + np.testing.assert_allclose( + dists[ii][jj].numpy(), + expected.numpy(), + rtol=self.prec, + atol=self.prec, + ) + + +class TestLegacyRegion(unittest.TestCase): + def setUp(self): + self.cell = paddle.to_tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place=env.DEVICE + ) + self.prec = 1e-6 + + def test_inter_to_phys(self): + generator = paddle.seed(GLOBAL_SEED) + inter = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + reg = Region3D(self.cell) + phys = reg.inter2phys(inter) + expected_phys = paddle.matmul(inter, self.cell) + np.testing.assert_allclose( + phys.numpy(), expected_phys.numpy(), rtol=self.prec, atol=self.prec + ) + + def test_inter_to_inter(self): + generator = paddle.seed(GLOBAL_SEED) + inter = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + reg = Region3D(self.cell) + new_inter = reg.phys2inter(reg.inter2phys(inter)) + np.testing.assert_allclose( + inter.numpy(), new_inter.numpy(), rtol=self.prec, atol=self.prec + ) + + def test_to_face_dist(self): + pass diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py new file mode 100644 index 0000000000..4d59117560 --- /dev/null +++ b/source/tests/pd/model/test_rot.py @@ -0,0 +1,234 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dos, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +class RotTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + prec = 1e-9 + natoms = 5 + cell = 10.0 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + coord = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + spin = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + shift = paddle.to_tensor([4, 4, 4], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + from scipy.stats import ( + special_ortho_group, + ) + + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag"] + rmat = paddle.to_tensor(special_ortho_group.rvs(3), dtype=dtype).to( + device=env.DEVICE + ) + + # rotate only coord and shift to the center of cell + coord_rot = paddle.matmul(coord, rmat) + spin_rot = paddle.matmul(spin, rmat) + result_0 = eval_model( + self.model, + (coord + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + (coord_rot + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin_rot.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + paddle.matmul(ret0[key], rmat).numpy(), + ret1[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + paddle.matmul( + rmat.T, paddle.matmul(ret0[key].reshape([3, 3]), rmat) + ).numpy(), + ret1[key].reshape([3, 3]).numpy(), + rtol=prec, + atol=prec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + # rotate coord and cell + paddle.seed(0) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + coord_rot = paddle.matmul(coord, rmat) + spin_rot = paddle.matmul(spin, rmat) + cell_rot = paddle.matmul(cell, rmat) + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord_rot.unsqueeze(0), + cell_rot.unsqueeze(0), + atype, + spins=spin_rot.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + paddle.matmul(ret0[key], rmat).numpy(), + ret1[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + paddle.matmul( + rmat.T, paddle.matmul(ret0[key].reshape([3, 3]), rmat) + ).numpy(), + ret1[key].reshape([3, 3]).numpy(), + rtol=prec, + atol=prec, + ) + + +class TestEnergyModelSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_rotation.py b/source/tests/pd/model/test_rotation.py new file mode 100644 index 0000000000..94e3442631 --- /dev/null +++ b/source/tests/pd/model/test_rotation.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle +from scipy.stats import ( + special_ortho_group, +) + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.utils.data import ( + DeepmdData, +) + + +class CheckSymmetry(DeepmdData): + def __init__( + self, + sys_path: str, + type_map: Optional[list[str]] = None, + ): + super().__init__(sys_path=sys_path, type_map=type_map) + self.add("energy", 1, atomic=False, must=False, high_prec=True) + self.add("force", 3, atomic=True, must=False, high_prec=False) + self.add("virial", 9, atomic=False, must=False, high_prec=False) + + def get_rotation(self, index, rotation_matrix): + for i in range( + 0, len(self.dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self.dirs[i - 1]) + frames["coord"] = np.dot( + rotation_matrix, frames["coord"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["box"] = np.dot( + rotation_matrix, frames["box"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["force"] = np.dot( + rotation_matrix, frames["force"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frame = self._get_subdata(frames, index - self.prefix_sum[i - 1]) + frame = self.reformat_data_torch(frame) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = paddle.to_tensor(batch[key]).to(device=env.DEVICE) + inputs[key] = inputs[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestRotation(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.rotation = special_ortho_group.rvs(3) + device = paddle.get_device() + paddle.set_device("cpu") + self.get_dataset(0) + paddle.set_device(device) + self.get_model() + + def get_model(self): + self.model = get_model(self.config["model"]).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + type_map = self.config["model"]["type_map"] + dpdatasystem = CheckSymmetry(sys_path=systems[system_index], type_map=type_map) + self.origin_batch = dpdatasystem.get_item_paddle(batch_index) + self.rotated_batch = dpdatasystem.get_rotation(batch_index, self.rotation) + + def test_rotation(self): + result1 = self.model(**get_data(self.origin_batch)) + result2 = self.model(**get_data(self.rotated_batch)) + rotation = paddle.to_tensor(self.rotation).to(env.DEVICE) + np.testing.assert_allclose(result1["energy"].numpy(), result2["energy"].numpy()) + if "force" in result1: + np.testing.assert_allclose( + result2["force"][0].numpy(), + paddle.matmul(rotation, result1["force"][0].T).T.numpy(), + ) + if "virial" in result1: + np.testing.assert_allclose( + result2["virial"][0].view([3, 3]).numpy(), + paddle.matmul( + paddle.matmul(rotation, result1["virial"][0].view([3, 3]).T), + rotation.T, + ).numpy(), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_saveload_se_e2_a.py b/source/tests/pd/model/test_saveload_se_e2_a.py new file mode 100644 index 0000000000..c1c2ba2cdd --- /dev/null +++ b/source/tests/pd/model/test_saveload_se_e2_a.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + DataLoader, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet(systems, batch_size, type_map) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadSeA(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_e2_a.json") + with open(input_json) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + batch_sampler=paddle.io.BatchSampler( + sampler=paddle.io.RandomSampler(self.dataset), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda batch: batch[0], + ) + device = paddle.get_device() + paddle.set_device("cpu") + self.training_data = BufferedIterator(iter(self.training_dataloader)) + paddle.set_device(device) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pd"): + wrapper = self.create_wrapper() + optimizer = paddle.optimizer.Adam( + learning_rate=self.start_lr, parameters=wrapper.parameters() + ) + optimizer.clear_grad() + if read: + wrapper.set_state_dict(paddle.load(model_file)) + os.remove(model_file) + else: + paddle.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self): + model_config = copy.deepcopy(self.config["model"]) + model = get_model(model_config).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item].to(env.DEVICE) + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item].to(env.DEVICE) + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + for item in result1: + np.testing.assert_allclose(result1[item].numpy(), result2[item].numpy()) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_se_e2_a.py b/source/tests/pd/model/test_se_e2_a.py new file mode 100644 index 0000000000..b1e6abe5ae --- /dev/null +++ b/source/tests/pd/model/test_se_e2_a.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +# to be merged with the tf test case +class TestDescrptSeA(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec, em in itertools.product( + [False, True], + ["float64", "float32"], + [[], [[0, 1]], [[1, 1]]], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + exclude_types=em, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + dd0.sea.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.sea.dstd = paddle.to_tensor(dstd, dtype=dtype).to(device=env.DEVICE) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptSeA.deserialize(dd0.serialize()) + rd1, gr1, _, _, sw1 = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy()[0][self.perm[: self.nloc]], + rd0.detach().cpu().numpy()[1], + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # dp impl + dd2 = DPDescrptSeA.deserialize(dd0.serialize()) + rd2, gr2, _, _, sw2 = dd2.call( + self.coord_ext, + self.atype_ext, + self.nlist, + ) + for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2]): + np.testing.assert_allclose( + aa.detach().cpu().numpy(), + bb, + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + + def test_jit( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec in itertools.product( + [False, True], + ["float64", "float32"], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + seed=GLOBAL_SEED, + ) + dd0.sea.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.sea.dstd = paddle.to_tensor(dstd, dtype=dtype).to(device=env.DEVICE) + dd1 = DescrptSeA.deserialize(dd0.serialize()) + model = paddle.jit.to_static(dd0) + model = paddle.jit.to_static(dd1) diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py new file mode 100644 index 0000000000..f69d2f5b83 --- /dev/null +++ b/source/tests/pd/model/test_trans.py @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dos, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +class TransTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + shift = (paddle.rand([3], dtype=dtype).to(device=env.DEVICE) - 0.5) * 2.0 + coord_s = paddle.matmul( + paddle.remainder( + paddle.matmul(coord + shift, paddle.linalg.inv(cell)), paddle.ones([]) + ), + cell, + ) + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord_s.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + prec = 1e-7 + for key in test_keys: + if key in ["energy", "force", "force_mag"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/water/data/data_0/set.000/box.npy b/source/tests/pd/model/water/data/data_0/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ad2de625b40040a2d13248dd8b197a0f885bdc0 GIT binary patch literal 3008 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I#i2099snmP)#3giN=P+50J1|)!uk4+3o3j;`gR1G3Tu!RLSF@z3=(J&lM zhw$WpEv;Y^gKGdXK=Pw%5FvssEU<|obc}}KX!syf1GcchCWg>4ntn#Z2ay`Eg#|V- MgpSelGZe!I08k$V_W%F@ literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/set.000/coord.npy b/source/tests/pd/model/water/data/data_0/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..8bd448b1254784551c11c2c238af183a8dc0a4f3 GIT binary patch literal 184448 zcmbT7_g~Hb|Nq;24-G99MN(;LoyYAS8d5S+nH91#GP0#XS}283QYwlvO4NBiZIVc4 z5hV#(8Rez;p7-ZZ_@1B6IX|7tc|0GF$NhG@U2hoyLH>cE;$rK>Hd)MH5It|Tg{z5$ zd!&QKNE3_56|2{*4v(6BWJy@ThOqoMYqw$6j$9Fbao~V zx+&1&ZXc>O{({6^dNj=q^y*n0(jHADR$PEo)glCLt;URdhS+22OS+!2biw;Q!W928 z2l)$V&oCybzCvDw{eYURB`@4H91ViUcxCsSz4>WGyTt7&>S8OV>Z(yoS`B907sBYI z5xq|^p)dUpIF0A#+^eor$Wk3li7T&h?=M8qMHO-MX+FVAvlKRRh#ob2??az=3V7<_ z<=s|bqJk#9v6H0xjUAkT`qA7+ynk;q92Od2?j}a%s$Mj8WfQmE@(wP(QlRgSM_3NkEJl%COrf^A52PSvLyW7%I2x&U@%jr35KVTJ$V#q$}5qAjVgvX zji&FnBgxj?jaH5Ef>8G~tlZ4$!krzMS>Z$JpSIJXZC;e>(T&Sf^KeW>hz~ZexOLSL zv^Vc0oW3ujh=B@xpZx_3ZPaP|`0o(xdH|akdBpbVl3KqKSx?+U3R};zo&s_D-timD zbA}1S?>J(#*#eq+TancLQlZ}?TJr&Ys`${tS^0Icp^?QnGt`{Bx;qgoJ`EvFp*>sT zI+HqgfUYd+gyUQXddP=xWqT)6PNM<;ac2=m4Vl2wmMO5*K^{~xG9OU&KY%HfwXyv8U^uxu~cA=_^n3}Fo_iIvZXVU?{u&1^EG zEz6(d#56rJ`BjaWg#|E}GL)A045K^4TxnU1D~H<$U~6GP&)-^8?v-HjkUYsIuV})% z<)v)ReoN}$%OJTk3Hxre^S)zapVr5 zY6>~*vBra+_dQxtZgH#3wsQtMUm@tZDW_0<2HyD|v}p866fSfmrPt{srW#KZKNs<< z)-K1Q)K3`5NTexphjCBbgZ{3+jQuhINVR5@yekXB%wg5t$U#5(8rb^(7^-~ ze~LIPfkmo@I5IQ^pTzp%^U8+KtSp6-&vE8tHCoWNZ#Jpp8}o0pATzg@&{00k+`eaH z?+ytv`*#rear$)HMV|)RLrGf2oqVorpx-mC=#cvu#IDSP@>c`ezGeghZ#Yu<{%D$K zAfQ*P-lF6A0{HDL#pE!^ zE;}%y+1KM##d(Nr8&1ZoQZVLwCbR6EO2I27>DD?U(lP}X;5(Bpwk+eD46cAXww}$} z?a6``Pov+hnb3-Hr2NpI$ku3ORendXWkQ#5!+BjQ5v{YkmnyAt^r98P-;i)zm3B<> zr#YJ^`o`Xi_;*TJ{raDs}^{ zIwwg}mq<`Yni;&6$5E)kENtI+4yCOUbm098gdGnjk9V6X=7k*XHolM1ld8C_uQRbY zFo8eRyPN6zdQo?*S8d0DGn0801DV0`xbHj}gI2|`<3)wSvqv6!^bQfH2PIpDHEZwV z`N&apw5%2*W>`?Q)j@h+y^*5bbNJ`cR@k~zk?xrsL*%zY*rraVsaroG&{&g-Qr;kQ z%Q}ix@`b`tJsOjrf~-y($mcFc|APhOepd~;3p}v9<}~`=e8kj$#?*Eo8xu;7c_?29 zqUm{Ztatr5SU8$fK&%lJn7OcLr9r5_)c~cE=UBSIjAm#3#K?OoO!b2}DX;Y-u3VGQ zo(9R5YBUV=z}+MpLlXu#(wa;Ef|Y2%=q>aMMq#$;ZkRsxW^uWW~u&N{fyrox}Q(PuO~C8R^O#V@f@_AxZH(UzyOB>nfZvo`?SB&Pn z{zj#-GRsPHpo^~=UcWA2t3S;`u>3A8To2ml{|e_WE79Zga^ze7lSu?bV(3OGrWWf$ z5or@}Z})4_y*q_GCsksms|{^y9YY7UM&Yl$KG_vqM8~_u$l6Evr@5ZO2XOJq~4!iSgnjj<3H|RFADOne>%x4%n>`mzF0cTn>rWD~m!zfI* z6p>N3xG!f&H=ls?5;6pX7e5kMZjML64AK0xrm@y5z=PsduCvYSKcjccbZnnsK;=&g zpkKHa3viknwa^8t7wFQ)XR7qjQ#6xEYS3^6ZC=g(1AZK?744B3lr_PFCcJyd{p#$+ z$_d?~Gm~?_dgv^wnj~nZ9$no+|%e3C9f{p1J=pRGc5L4(QcKq^HX@*%mci}`Sut9Z9aiN0n? z!(O!(y6Z<$l7bZdvsb3Udu1p#EQnSng=0&Y89AtBqI8lh7Oy&k-0zI6tCX>R_e9~x zwm4Mf+(a&^l2X-2_yp@?>8kxKIw?uG<;iPSQGFkS%Z=%?=h)i%VH=>f@d?I#`Hm$I ztVrh37hEX($y`<_P~74Gx|MB8nzFuLj^02+=5x4zxeqcM-{NeA3O$cIjSw|?EEf!=yBQXAsC6qj z8#b}ay~8Ny;WZ>?s|i;uiihW?5wtG;4fcj7pssf?3RKO=z5E4h?+s+<_sY}Uqqo`D z71c1Ul%sjiF0%4LvuID6CkajEXz&*WG8u6}V5aLzuUA zBeBK}yU07dn0h{)W!>#Tb?n|c&IgL2Q$)C}} zZvARp7!nGpaf9i~jN#<@Fdu8Rtf{Lo1MU)&uxrS0ipU~*x+jQ!l_@ddk{G<&FGH>C zyy%PQoOB(0lf4_&1g**$IPGOl!yS{cR4W1-65ny3o_S%pX#SD8t46bP&T@ah$q?xb z=cj13K-S<9q^3-zh3_3`%%78-RQO92R)0ip*8{;CnFhG+?8HT#3{Kf}C3&Piz#bg~ z(mnP>SbIW|`|)rjHp?6qJ;&N~rgtU8{7p&uMG!eP$g0^TeXyA|E3^)%YG)c&Y1P(4{!$ck`&Y9!{zQQK`pbRt6CD2 zE@w<~vAZaBp9jfn`|v$aFXPrOT~ZN?qLY!=(COw%_jBdwkw`=SC;tt9r_G~{gBh~o z%&C9JVYux&%#d~&2ZY{~cIO6rb0kafWLgANByPY-(SWk*-Xn<%#@UBa?CEkxO3%8> ze8Sq{ZKp+V zP?M(|4K*yq7L)Nz@%L!@*=a@Y*~4JBVIKYXtVzG7T)>Lv8bSS6Ti6|Spj$bT)V41l zIVtzq?OO(X!CM!BnWGjb`|&?`7uK@p)fp&U(1Ud*mssnAO>Ft6skEW68!P=k<4sjD zOZ??Zn`XFpcOewb;gK@Z^l?lT6ZhJQiKl&qfBhY(^nw}g ze`!MLhgl>oRtKcH(b_vMv^gamyQ)-a#fn;7cUg-;f&=)bodK79BS`D@6>PM_{fF6iq13Gv$LI_7^w8YQ#7 za{HxQ5Z)bA%MaYgEX^UbW3?Q`dkvxF-efAvnoiZv!JoTy3HBq!=)a(cY^F%7?R{!R z8k{t7hJV0Km!~UM{xrj3BA#!xA?IPIvEhp@3S;t6XW&2=%v)GohZ@I?K8D`L`#5z# zi?*B8!=O$P8}#PTi|KDXzGzxPMz+c zWX=!f&YL+y;oMAedM!iyo(999C7Es27|-i}TF&V&)j(tHL0q?&5>^HjV$-0ZbZksM z+xGM*yW8nYlfuMl?x+zIdCH1YToy<_4*BsiJ!Obdj$y0*#Ibs*09vvWN?Z{cKN7g?jsQJDEFCX#~JKT)<@CQ$j=PQpJ z#@@8ApONXki=1NV4ZP@8r43i}ge7aVD5ya68`}JNrSV(oi%c(UUVKEG{ZIA`Hr#(D zs!09T1lwD36t-s!R>Ul%8GH;m70b}t>PkG$ljg%hGZ6M?AHQx^FpG?LrFm16SVp5S zWq#d*sP_qUx=$WQ-byozgD&L#EZ;*u`Zac+(G`T6-9n`%QDOWZnA{&q`D-jcm0x16F6{tj;le26iYLnuq|L|O>78+6U*Lg77qX4DD zwwQZpD!EyctLhs2NXK!1N32C_>q&de6_{yq@gWD)x`rCzTAXn92!cK^LD+^uNx@hScHd9X+bw+$E5@JBUWxs?o-B z4gT1>O|);ya|my}!kZ{@oTWY7ej9fjJl>8Gj{S(7;D*E9tEnVp2bGPLBafB&80qK0 zTUW+ose2sXJ*JiwymX+1igxBPW;!)T=i+JJYDBCRXHq74RJZaF`f=dZxhW*FWgZwk7|f6WF+M zI~z4`zu>D$2x-6j#nSg%k;nB`(SLY|bxEb-@lII^bV$MU&>^(t=rFQbHd*}1k+9L$pgX4Hs6nd-qem#v9`z|?(>@Ko%LB;pMgerY(y>9h0qMqS zaP^oZ=`Kf+o`3WGLJq1j4a zH21a|Z(e%|#&fl(x;~j(7GOZL9F1tzr3HLXQVg{^{KErfapL_52v!rINK-tp_ILq&$)b&HM@eWu3r^Auaak@p07cB z&k{`NbVk&K3G9m6E}_@$6oIDUZ=msouzcrzr1MU6HU9+mJ+-780okPIw2mfbAK|SM zjUn!$NmGX8V(+|T2oo|guRofJU9=s!^Ju5 zPZ6rxDr&#fR^XU~CFMGYvhUkpGUNJ5^sGXH6myN}r`uV9;hHJ5DaW1vFu4%PgA)WJ z6DP9#MSgVbW*)}xvY{cRU!f>3h0v(&@ZU9$+tR8>5x*9pxl@_!CW<6v-e=T{&eVxR z1=KCI01^M@Q2(Qw=+el;<6|{2AFVF(*Y&0Wjvg&y%|R3QiWWw-#wxI z?@MrQSEOTQD+O}5LTI+StSCdQ#|si?cFBXuYD+p^4rD{gqL=@7ZW#tTwWu*pf=-9@ zbEl?AQ=hJYPblfa4uetnG<`A^o%f*(k4s#%NTa+D*Q5*EGKKqe^r&~5Dh->ylHYBz zktB|MM)1d%;7eaK<;_z$7wJLBA6o_86&f^jwFhp+Zlu$}apdbRO&M9I;rLLImy_Lu zxTb@=ct9*O9X*OnXO3k09)9%2Y#+SzchZSD3!dV$Js+TUt;ZsAI2fvB6 z+}2?i&b?&;xt27@s|QYVkFas4^6^!4&b^v-5lM2EByK26>o40NIn9-(xzDEj7H2wE zwGrR89)-v95p=tGCq^9`O`(V4>FqrMJu>)#h2L`Vp`;oukrup__$>O^as=L!m(q$e z*Kxi0HI8WMkXZF6n5A&o{NOA5)1XAvi>0ZarO=Y7r_7~Xivomy;5JH6Xs)7#1K<2e zxn71g&EAElW$RdDwGw@trp#a4r;1T2S?E=o&D}kijo4;0`chINsG1o}Wp}-(;$0t> z9kLR6Qh`jbV>)eltj%xUTZ5!wdp&Ml8p4h%5uNg>M1;Bc zXQoES5*H(Jt2Dj4;ZMnrB&kMpCQlF?5k8$e9q+G(lfFwaUd=fKZt6L7Sq7lvfG-8e z44_!*1s2UULEp?0gsc>4+~)`QWk*F?{JtMQ>*hyx@4rq+@H+S>^7#MkHK&({-#{l# zm-2pHK{m3+>o@E!k#G{7oOn2%~ z+8T8dFRBlt^?D~Subu*|Ri=Zx6zF+vF=ycU8#byl`BnBUB3&E_(|ePsH)ISAnBL)* z%f5y67af|asLgaWMOlBeGL0y)=g%v}P{ihkc+v6%MVP{AI=SW>yhRc7`)kQUEBf#+T+bmB@7 z-ikdGmTQ!85>Gjf+xHW3r95|jP!XotjiaV}*RZwFk*-cop$Efu(A6V_yklh;jH13k zw=A3fcAv$sL{F-A`GnwE8WiF1A5>0mBM&YF75_}hGCUb|JNsD2!(j+mKZDY>?D5J= z0gn_?A?ExOe@aKu&@;6-Se(tij8kDPDnYdPOeRDUWc4B#|DQS#Lm24SZ+Il z3V|-^UDKlEEpsU|kkO(G(RA&sHTlOZL|gG;{F^_5elE5`v)3@{+p~^_{&Jv?j;-hs z`HY`dm*HW$HUFZ+o7lHXXg-Lh^i3@oP`rTSZCbQYMUk%chz^DGPX9a$>kfrb@}5ByJao3mQ6`f7 z%vg8aZAe}>rVSC@f^E*HnbWsvbc?Byv!Xs-bu<;um_3I!+?&Juo0mdu?`nG|@FbBq&q z%7&8Eh{qW8C;`ef^{AY>?eMaM7b(+VF;D%haT!c95+T_8`B z-xUeQz4c|C3G3<1_g82Nt@MyvqfW6_Ymt2A1h%|r=QYfiLGrsP9W0ioEPogN-$OC# z@$}}kRR=IK${vPSXH(D!KkDDD$_r)hVcC6sQdJM)&dfEZ*j&+j`2_w%!YVr6^B)4n ze#6HKJ(Oh%D6e=Nf*OuvtFq{sxDt(vja%uDb{x$eD>^6ouS4QP8z&@jrij zU=a&O)4!fk0;3r-nWoKd94}dpz|tkq3JDdsZN2L;s;59$k`7Y)ewQ z_aP_BvqP8cr-p4YH2y{bA2)sk<|ir944HDw{auJ#J<}*-M?WmCs8iO)``{uYDLx<& z8nevluiajpPIN?g+8XTowwy+o2IJ~{C!G9u3W-l2z`e$T%G}Rl#N_?Lju}DJ7xuR{ z-N^t)M0eHC8*;Sx#s`-AEfC{fo3VCH8@z5P&=faGS~+`w&6=e{FV99$roA2oi>F|w zSvD3Oeu>DryK#bR;I1B-LDDyr=BoVi1=}cQ z`!!Venm#XkZMw-l3Z?A{Xs$X!oND`J=R>JzR26cJcvg|)^*yZ}k6m+x?T88}~ zqhgkoLv(q-1b0G(Ed62}tg{kfaYR7nRS&UfvmSLM|Hj-^7RXH4g5K6DL0L2CM`a8W zlv)t@AebsUZ(_HP6P;~xqT@0rp*35RCU;-P_qP)uxOxzehi9QH*odYCUWdNu2H z!)8w&rnD``{b%}|_;^=J6o1TBSeCP0I@e)5`wY`vWl5#q-Dy(G6|hz<`Z?`5vb#MY z`B#|=nmsAjy`K5Jjjb)8laKZYd8)P}&fu{-CAd|w-${*-(B6hjkuNl=dlUBkSdQ}O zbpDjNFX~QckjW_x%2sOQrln|7wboy5i)0;?MLwWIZ73Z{A4gNRlyR;{|DtNkUwm9K zopA#%@Tju~mBUQ<^%^UvH}M6=tayWI?J1m3ekyk*G72;^Nu{S9SY@2!r#xYi8v}pTJl_xYB+Vu+nr1~wmwv%k;>2fmkpzJJ$Mnc9dEuJMf!1P zQI07=^M>e>Y=SB!ThF7f7goS`lxQy3+JpI`{B+HSN_ct%(rHLRY+jjQ$%|bWd+QOx zCTo(yup2Om(7^#Z&sNk`31=!1q%~VmVW>po=OnPoKua{xOU!X<#z;}dV>IkLEMo02 zVT&$hte#C%;w(tDG94L<tlMIDNwG{9C`Q20_4()zK5?${n;19L6tT3jdg z`{oNhbj~0lax`rW`-+s>3AnvM2BmUCX?R#ZOF1vb7BtC`*TlWGIDQA$wKXYkUnQHq zCzLiV8&A9E{ekprX)=EA$y8PRN$U1g$n}3kE21v zuw<1Frw{Hz^b|b9`~44aX8vKc47SJG^LpIsePgMAX$W_G&04lp;VH@^CD6CUj0QAq zD80B6Dzk@BX~01kY@drF6GO7{Vf05bi~`$~JRDc-$D&3ZQkXKHNNqG(yiH?;Z6Z!i zDHSm@hmy75CiMRL-#V`5>kcgfs$^(cv98G5E#Yd{sSqz|&%^l~e*V0G`i5ZI-04cn zw2dp6-i15&e_@ngGMlyU54Od=!lPsze!9#i(sZ~D>FvMJFJZ>5-4o269$13vA4OPy z$dG1)NuzbF2Q}PUNZEbLFk93n}f(|tMB0ukTwQFI2%4M40;j}n{`Xr<*insh#zpMI_k zy{T{Es}x6nY|mm={8*Z7D@G-Sa&+8KmHNgmq1WO*Fi;yp`s0(pO5HGjK?C;5OrsM! zjPTLs5KCW|0^8D?SSsS*PAb$uEMN-Cb{%IH-fpztKn>lEAF*UeKc<_gBXPbpoa#?^b9g>x2rP_3yOy!JjuSY9Y~Yd*&x&XY7xxsg6Q zfr*V$lzjUPk_}g4pG!PuJ;+6HyD9BWdI6blXR%=6U6_ir?3}ty7_eH+l_hvm>jV=z zsydb>DmB2W$$~MV0qNI|rBxpuV4=J$#bzAFoSE|=aZ{RxpK_w!F$*bPJcO-l%0p|j z1o`!M3+JU)3Xi2OBe8c~@GIDhU9P&+sB;MA1MvvplX&TyYp}uAkR}{bp?<$hT;X+7 z5l7a}8O?r)k48nPJL^wHPbZR_vlO4H{uKug{=&^)k62=LJwnz&K<$cNf<@vXbk*E#pA#qN2=^m!hvC&JYSYL^X-TvV0nq?l=EB--JelNdg zY${@#2Ggz9>1-rk5)8&R37l9oD0>!J>68 z@`cZNN7{L`3_BBLa4Gj3;)hKlFOz*NdW|AmEI5Jx<=??WRLFX1DO@&+xXH<3^fyF_ z8@C}Ek6gP&v!f|JRsH0?N-7-h_ufJ4;OmIC96|rQdJ!Yx%+4mLP}-kybSG1j&do?h z?CQ&qS@jC7Z+2nPR|Wp?{vdjM;41>vK4VXVGlWSZ=0Q1+=FVGS&6-VVXMByn2%q?^6pSb?xx;uLx8F&n2*!LmP0qokr9{8%7M zmSYXs{7r7OsK6e(Dq9(MXD8k&XQQOulP=A^3Guu^q%}{6Rw$2#LFXo{k)J2Hrs_#2 z4gI0|;jx3Q_zksdM^n%liyw2#!NyZr=wk``dZlnQwMPQjuUQ<9Yv;#kjb z?0q6eF>5(Uio4-tL=Lwi--{y2fakrW@Y?zsG@IlhUSdF9oHZTZauGGsBkASz3uyXk zhrn+`D9d#e8CZ?*7-Od^kd)hrBb&u&nt~#i_i{WP8F`-hySzq2ekf{8bg5mG)fD?K zgSXZN?#Dy{f=hL%`r#n5QIhA!E6bBGN1Bf?=)vCfgSb#Rk!(+Rl9_x9r%V4~Z1#6} z8W#(Si)*n+QQ#BmKJ8fWlUm_N3edtNjeZIN@qQgmrX+H}MX9!7DG z=hNpTEn1`e6gHpE2%0p+sbaPT|5PdgT9=GT(`z^kx~Eb1Rp93NRdhW&3-7v*vy$e8 zG?wq?avQ%weaU%&*y3tPL|M_piJDa3XF-*xl1OrnCvkNfc!|7rg%xKmFIZT{5m~E0*B^R$dY~}DfSfeCj zl+!n`n)sNkvOcvCLh9e(^r^F`EZ1b)b^-o!p}>+u$iSh-R9LJm8Q1 zD9*1&c4rZ$&#|H7;ib5g^&9-g{}8j{G7gUHXJ3bPaGNeO`WN?>iaz*`R zO&XhJN^re^Kbu92Q+YNVx4J=mzBQe+cB0nb0hB6sM<8Rp9e+ltQi0P1T0Y=T8@9h; z`So>BKRg49d(DVq;?XcE0D}iLaaRKaFwaAt()`rvr{4jtHC)7Xq-ycoM!&@<(-Q3M zVpR8S4B=E8_oeMMjE4Nc?Xxz_WJoi5Uv@yB4dDh$ttPF~PMr9rPjBz|a9dMzIGK!5 zSg|V^H^0b}_Twoywtom+beus8Z)(xL2GK0G-(Ry;%P4EwZCF2aqI+XHa4cAkmhb4o!$aYuxx^accIIT2k_#<*#X8R1 zz_v)xz7ujdsp!XUH$~%GWF02l*P<`>FX7S^fLT$^g8R1|NCSVF+uD1`E7Bs*Z;rw} zugBv2w>S77?;;&!O5A{`N6|f*EX54yl_;az*snqd8$)o2uY!K>H;i_lgI->nqDIUl zz3i_Tv_Nz&b(-RqMvy4eyX#?BF`juVikcrf)R!(##g#h95@f>T#8aW_OHiA?J^m|f!K#cPsvKy=qYI!% zQf`#EM_-*uc-HzbP#9A3?|LqLHpRjwOOH#??~rMeMY z?U9QI+P`q^W)e5X$kapZ%VPSp{0T-r+XjmaW$Fw(0A(%%Mdz+@>y|9R)D#nPovcdz z|0(eOp1O3vL543gkfV27_M_stH?5i%K-?Bp{*~A_R4)38v5R9_gWg+&LHl?hQgJ98}+VBnTuuc>}uW}paZMw%C=MBfv@(!-|;UlC>6c>1p zxrPyBNW&XNnYFtOg{+S!)pS2Pq8rBFoqreOB2{Qk&26S^P>7Dm;ba~14xfMh!Jbfg z3ZCmv*G+s8*l$Pf=TD*Wix;-`RbzCY9Vt9~#ir*!77h}5!_6z-V}8CoMZdd(BTllg z)1N{9)3t@-V<%!zX+2hn*i%!rY$nmZ3tBJQaA|cd?1v7fT@!y|u+dYN&}B&8Rgp9< z+l-p#r(%O$2`W~}(!=?Q5Nwy>O_zm};UaYk5mP7WbZ;2FxQb8tvjnc1<7kj#A`+ji zryDyjL*v|QWQ%hDHI-VF;B!&bzqrlL2+T-M%mv$aX`-foFolk3#iA?e!rc~2 z(D`W`)%+865<=%dy&;$lT``K!nEu2={DU+%HmCsdiMhh|@FIL|)}g)4F)a6f2b-2R zgN7{oiC_Pu$U&z=5EJJ~fkG=Tq+b^5%hK4YUcg*w3VmqFhyM&?YWg5ab+TWXt)m}~ z6h{cWZ@AJL3c>hGA8_*NB$|KXx@b;xpdNJx`jU~4GgItn{_nH+Z|xG`V>54X-Ld>r=x)pqqT%-|7sw2u4p%z-imihNO>HB(Be!oTqbSQ9;x&ZIfh)Xp08 zt{+Bc8&5#%@@_b)>QM7f8yXgOmR)I0U=tff&!&iFUASM4%hvLs#q}@Ql;I~)q3i*5 zBT?41v=BQlrXXY85x3b`g3+Zs@Y_!~2 zN80OcMAAH$$?pp%`ObBm-_l>W6H_VF9a4|FA}gw?lBCojTJ&M#Zd&*&oIYzL@s9o5 z@o<|Yjruzsk?Zy#&V3qvO6f+svMeRV%h2wqFzOoEg-Vf!s1}}tbWx;?dFhxE4kJv#n|Y0TdyOv>>?j_-RU#||bLbFj~U({Xb0 z5B%MJ4TZ*{8E?}Q6c~PETY?R#z$b!!z8Xy%W~JgI&!c0Mhz$##2gmvLH2HowRb=Z@ zM2tSU$}h!byR(?zaYf+$cPbUCC1Hqa9Jy7UhR&zg= zAy#oZB{Wpy?F}o+cp^bNs#2U<5Qj6#3)6n|co(z}0NBEy6EGz1O`#=k3c{jrRqo@bB%ZVIBRd}A8k*QbPuq=`{(f2U=hB36KUPM1^D5&5+eq(_+u_{m~&2<)~e`}&FC-O zbWyetIc^R=)Vdg!$w~NLI*yji6L~1ZuW%K;9oXfmK|fk=ih2hBLH7I)%)Ttg&uWRL z3Dk{|=h_gw+?-AK8|Lxc%Nfq{=a4&5jXvkRW^xmwXkFt9lI_zYf#waUH9z&Zvg0A* zHYM^#xdY5H)smhk?_<9l<`Ny>jmdiJ$YnqmEl6X5lAg?bQ8jlV_CI(?9pe7p=h0hd zO?gTm;1#Y$(!M*WM=yd_ZcXDSD*NL@i~&!saj=Op&5x*i{VZP9w@|#6m<{o8y4!y{h zs5_DC40pVo-;1nR36j>PsH5ZE&MI=%4bkzl@;AyzMN*+o6&mt zxwwAj08XFzh2YUKNZL48KGj9+knvxJg*`kCE4#!zeS4q3q0N z1igEWpWmgZW~?@`qepOaO#*WmH@2a%_v9wts&K;Mt5kRae5nj3UD;kYy`x-*Fq z#8qie@m!27tQ92OT+DwR|G4&uh*b}Luoo}3%6lwLD8bBlV`|)R-$QD09Lrldk4Dyf zg4IHmp z@PMW(eQp_s*B1QxQ6EvZK$|ehRjY)e&Ih4(%xu9ahrvLl@4cdZkv!vI+)U}G65~0{1EBA z_GI3D0I|6#xHUnaG(Crq&+8ECiT}-(=bgn;FA3WBNJ%&#;sx@itRg4vWH^nphSZ%w zbSkbE(>CPdmC`BxR8uC3HB6~xo02Gtkmr}G=+OGecSfMCa31_b znm?oa7yH>G4!`z5PA=dn9!b^+Lv5bI#MOZ0nKVU+d zlbQ(Zza*&5iJ+094q8%;EDbMLBg=!4q?{jyx!XpPMeqT<)enS`Z3$XEi88l76=?oy z=ei?KAyc^(1Mf^}m&$&emAcFlj?bl;7pK*pk=}vS%RO*ds!qn0N7$O?#duWz4CRJz z5#y;wnNcF%FRYhwLS34cxQN_1ZCb6Gj1|tAka_qPvWwD@{PY`V@+6Wf8Z~Kr$RD_j z41|+W8Qyv13T9clQ~1$TWS!kcqs&B!#Pv_`;$+C>hbl!kpU34str8H-$wE;VxkEf#Lop6}64ExNFvCqT2_&sNDcu04wVW<6@AY^?) zjd>MV{8*jDC;G6AtQ&0H@Tv6nWfvqC_aV$%iX|D0rFVy9AaS6MnZzVPF*6DNzP{wK zvmWce3?}IXigb8wJDb;NgGu!wAa1n>1wV?#`Hm-;X%bFds%2=&8cjhiV`yVl0n#f? zX~Bs~ynE~q*@X$ndyxzK2}8*L%?rF*%|YW^JCXuUVE0{7KkUI9Ztpu+QqoZ5%Zr-X z_skmHU*F3vDTq2pGsaNKq>DIzOqVXrxQ@EJA$Zm?nA}8n&0B#CdvIk3OMRA$%Qo_4 z+AAe|Cgn<6#kZM>Y6B`$H(<`T5p?(dArUt;5nA0l`2dB9IC0mAB9v8WiJl_wGe?Qs z%6<5Vx0mr{(P0eknn5$geW-4EEw}LRPdu&c#gj4#wzj?vvpYT@Y_Ti9Yy28IEYX44 zTc2U}djU7++ClEgu~_^+j?Ob6$F~jR+Iuf5Es@ezw6EiAs+5_85JEyxRzgOlltg8w z60)*0qUV0h2&s%xX0lb1O}y9t{op&E`@Zh$JjU-trg^4wFM&-m2s`co6LwkV7MK$Iyq+_2Pch%Tbj48F9as;oSOCB$+so zL(@lOCmPb2Pm<*48%-(6k+{Cyn%XuWM8IV|$jfcR#}gqWWh;fNH!PUkq;!0>{s`ZB zdX#?b5@d(Bv$E<(OfDu#bghZsm$csC`F%YaKF6P(56DJ+)C-XQOWZqZM8kR3Lczik z2R)4`YHSq!=--cQM`dE&j2!$-t-+S7yD)UQHO=ZbkHQ_zsLi31-=9-Z8L|@@VNUGp zjh@wpn}(xAKZFkTtA|BdKe{zc zg0z@l{x1zZKr!?5V=^9F9#3 zg6xfr5FD!zs>%N!a-N`K`$H6rYrq$S-FW6Y6b~k-i{F~NQ`wlqLjQ&mwt(lpjT@{m z_qHC%53r`|I!ADNxgmWBz5)4983=!EN!3%0KT^dBqoVP`Mx7S@QKR`Pf?zF{Cb8-aarA^T1ecwL z(w<0KcGHE{->(*2>pL*er3c}=SFyukzj5r!8#MOn5xiWMl7q&5r0o5S;vFAE*(qy< zn%Wt7Jn9Ud@_VYxWksxNWmFXqO;4VwlDhU)Jo=jA8F%*G*}*}XIR;(A=kf7C~!K&GgE$4T`Wmn2A1@^FrB(K zjiBU}^Tdj#=aG}xf}Mxfk>%E%xc0-FmgYRd{4?tGYBcY*eVj$|U+sXxu+$s!tlyr4UY=!{EXeY)H>R)RsQ9=*d?oHeJRFjmH=qe+5cD zi?DM0A0b>KkYxTC(%*N7nKs=(&hU-wK#&$?>|i8w=sNZcQKC^VD-gIa1WPXK5-YN# z7hlIvfqw`4w5kkyd6)l&n|{^2^07ktg*du&<{>gBZpDVZ_VjP>G4xd#3w68cV%71h z5v@0XCVp0=Q&z1);4RK|2=x?8eY}H+Rfo~5ZX~(A38kqi>f-k^-yqej1BGhan4DHE z0)nJSGB`wR|7$+IoheD|-xKt4oaXat!m0{G3H|Ue4 zO$F!n;gNj{oBwPUEtH(-p`RTGSC^l#pE{6QTaI|O9v+W%`|5D-_$9ntYfMX1C8vr7OGxleeii-LuNY$`fMf_TiaQ{WVaX`dpauc^dW6)THO~pHaWW5t~=Xq5bk$ zc4p;Ja&z4RGvfVE*-JP%koQnK{=g$knPhh4qA}w(yDJPJV{Io=_c6h$>*Xv=-oM_mPHFE;7o4Styygd^G{+CYSs|$EzZGY1kEZQ?>)~@mjo!;E zl1um(R#~zTr=JT|O3Cgts=^ay0i8HqMV@t6p>xU za;`UGY+(!@^RDj+h z3V-~>D9{;7x&4fxpWwwNoz26=@1HSvq9h$KZbR3Puk6jKl}x!VQuNUJ4!fuE0FUD> z=~#!Yr{$k?ELV67HJyj(JI{m;x_v`?-D@_hP?t8%BFZ-~qhVh+!m8{T{5REM44+Sj zggQ}=W+ZJG(w72Pbm7^7c+k8VkbQiYh5HSrdyOTSJTj5)M^)p9>=*6@r-_eOa}2+fFDB<$l=oM%-Ik2+I+wu&V7x zLamE`;Apxs4W4TtM6h90d1yYS zh0KM0CTGe_KZFgcLG;b04K)D@G@?$4w#7SOnRW>50w)N|+dQe>n$PhXlH@qkk3956 zc$jKUZeCLVdEMyT)tf>$tVi0>Iarr@0y7UC$6>c2^fUShT2_C;ea@_DO)5bT{bpbE z9|~I6ohV^ugAkM5ADwfrB68sZB-R;H@D~TN=l$>g9hTH#nuF%(&AfYULyBDv6p|TE zM!{~Zr*bA%u28044IwmVniomUt7ZUL*%s0K-EZNit46XlZP@AK zf;;;LuqRC}^hd3dSw4FYw@gD?ua|!zH{TU1ryAiu;}Fi(=ur1P33^{Ul8R26)APMU zXwG95a^`1b`SP<+I`$sY{{pb5#E{y*PavzInw0tPGXhs?Atl-!zm7cjy3yf7(yxnA zz<7q}d?Qk$9wAavg*0c$ko?iD2={YgW>z*d)ySS21YPuV982A=HOci`DdZ!sdiCxJ z$Ju`aY5RU@3K?IHgwVrm(oJ3Q*1VTq@AI^TUlZH^pXaE^-3GH!zmR1zk*%-Z&hjpg zAa|cmlmz{P@7sawn_Dm)H&BDe_(>S3G#aZs;_-S{2yMG^4=VirHfPx%Y`pf2Md@!v znud%pZKfOP=bPidZg*Ev1l8!ZV5!KNGUm8Y$(4l|U}R2<{7=A^Yg-m2*=STf0!1%7 z$`5bE)6B=Hw0VUgZdW0=`Xf<)hTv1iwKo3Ej&!-gUe&e0*&!HHc4^V^W&R{J^b&s# z^=R*cz38%E#_za>)UUyqv&JUTT%$bp`12-o27bcr6O)9Khwpe^UlmKH`t>*+84tg) zUrO&RbKGgbY zfLKST1N~=pus$BM2bs=#3Nas;0< zqdN(sF?^aPO$?kuebm&+e}#a5zH3BHVQtWMoFbl5l#i%PE%KVG3irK=BG<>WkdzgR z)aGPdD(Hn`IXP_HFD;h#y#(*L!OX{~4oI=2(g}ZH6Jti&w^q@eE?;`Mz(XA3{Tio- z$Wg7tWmZtV4+RhV)5EWw*kvM1t@r=nq}MQNDse~FKS!Eu)quo~0BmzPh+oa_WK?#E z^-q|)MOkz;2GAxLl)7&FQ@QwGtVI|RimASN|ZcxAJ6J#u#GWt^xjXG z)ZXpHhjfCwj4HjX_yg7NzeH2d$f0s|7`MVhW>OjrCKvxTDJbozRV_~5@2D0}G1E-vPr zykk5kai|yzM%mD!ovIY6@|?w#&Be9(xq^aHf67yf#rVZPp>fcgLhkgS>v3P|x#dRc z+fp!M@Muc5dj_d-J0UDN4evv}aP6=!*-HL`Bk%F%@*L-sHD?gMN{(Kw+bC|hDo2-+ ze8s098-lv(G1jJs#ZK!-d$?y%#(y{aTpg-vI|QfFv-q3QkH)-npxSinD)~RRJVghu z;LvgzN^i93~X_wF?usD3W`aM6Yq@+^wETPjagNL*68xd0n^uX0STGSxQ! z5mJ7)BBsGi>}q%u@mqQ!rkv>hicm62_$NGS?1m)I8X8|#5CRlcX~|w0GM_m{yrnFj zwr^>{HtlbC7`2*>Rgo2KIBtcqXJ@g(@+*Qn88mN1)0I&xY1vb0TKZfBmmRkSwK1`1 zv&t9u?w`d@U2`JEy9ZdR%4k}!qX6FH*3l*}6RfsTWHiZ_Zhff|{KM{}s-KN#jcGj^ z9(hyjhl_Z7(~=r3^CcN`?`1{N)VcdG_;Dg_U3?S~ zD)n%hp+r*Wf8d<+0nAz}0sl1aODAhm^R;w}G}6G3mojAgs0)S0N<#A1Lbhn`C=xYG zlJ~j{>{@Qmj=xu@f|=>UHkI8hJ?ALeE(Qozo!R(S+?!&3k#uB*2VpYzbo+5{ ztIuurIcFUGZVnPZ7Ksq%+v#;rJ)6B6K9N>69DuQd3q@4&e4j*LXxIBfT1H#=yWN0J zO^!g%^LFS-jieo$ROtCUGn(u7Su`Yl0(`Q^(zkP*DU-MZWgm+0z19xTBYo+`)>oL| z^bi3%{1NDO5b4s|WKmPbbD2FDU*j)6q}2;cB;I4De_v=y7|2;eojhj7;R#bdrn%pxD5Blb{wU-lZle+-9 zkktsY=@5Hv$VHWn60LZzLj8j-3v%~bkS;%7oGM#^M?Bm6ka<$XM3BzUCgHZ?J49bm zCzFJOUJJbWx1Rf~$G2#SC!SqE^RHjV7q@qidUug^i@QWCr-X2ArxtabCua584Wr*C zQjOIb3Rtc|wY!ev&#-rbR{dbkUP~8m5pJ`^7wqWtN?Ba%oJr=isU(=dgy%|qMD9_(J6 zMJE&sP;ESfM3=rpIzpfP^Lo(jzMS_9S3qG6X9MId!Y|3wtjWO&lUXR~&h3j!{c6~> zhk1zT-HjD$w&c=QhVu$@S>!1Nc1mR`J?^>2{sr69;@Rz3@-&ILWfkL7aXW-u&J4{m zC!-Jg^g?PTHF6U+sdObR9yy34uO(pE$rFeiWI?YsMBBz(> zg*CKaLy@}FJ2Bbzpx1{CGyFO;nOsh5)8*PIXgY;5>nDEVN59^A#U!S%bHY}{XF9ST ztz+1jIFLTqg?e2mKF6w`@GklwHIn4t>4ATKvCTimQt+WXaoNS2P|jV#z0ZTo0aP?2QwI-Akiqr&^# z>x2b)-t5rRB~&@58Ci1aLVt21AB!w}3oU@}urFfSDN8YVmN7|&>e0ZnF5=>49T-$> zD^7E~2fs*7j5;uq7QG%#OQidX&5fGj)k~U2DJ~Ftw`tOb2U=t^Yn*tbIzQ*0y+hYL zN$PBW%&csK>CCn;wAb&&L+f5N?$=`6_KczrGg9d0e0ef;evF7^$A$fi(y;K*Qt`%3 zk6Du$=ymm4CiQhKJGyBDgjXY>ARmG`dxo*%P$@Q2q#@jwxeZIdeV*~TkFY0|^A&3E z!E>ECy$sky)7s)WU*?kd?4U8|SHyYDC-c#nkc*FlLixGW2?IXkoO7$ikW;g0L-05} zyUTsqPb;|g=7`sw{5eUCrX+O-IG4R(ue#6RvStHZfAbl)wg3}nc8H_~@E(wOkJsSO znrPiKkj9_Uq33tY*lgZ6mM*-GTyxH3Tx3aEIv=6%d>!lJY$Bt+R*9XpAaFXhPpXm6a(YG(>-N6`+s9_)?&hcr)V zHh8KNmHhUE-OK&#_o@(dUCqF^Ca!VzxsQND8q}OBL;k;YFr+C7hni|Ht~%~c?Kk4E zZ$>+2XO5x`S=aIBj2*cF`RCAaR})o(49IwwJGliNK~|Ru<(%1r3CV-e zlghc1>pdx|zY>#CkN0|Vc`hb2en-@)dsU4M-jsb_2`%-V2)G}O|CoV){SPBaX(FbC z-xU^&@kLUYHfbGFC(i@QVjCMRa@o>fd|cxRPERX`>GE(AaZkqZ#|5D({0j`j-eK-h z7nafcBJMaz)4=H$gb^~abbCM({x~a<;yJ~O53>UWUng$_?B(2tVndoEn+V!8h-^k_9pr2916$_qQe0(;%ARERek_{yS5poh5J#ys~JU0ilD1Fh}1kskm*D%n!e~gUUbC5r#T%V z*MG3+3%)eGC=2Y?I|gaxL?tSckuei&m!b36)-VmwS+Bp9whF`xSeToPwUXNl1zyda&UooRd`P zaZ5WYt{6hI$dC7@UWoSHb0u5PVHhmMxh1DYlBVfZGzC~w%Oq*>&S?Qi3pb#aZ`n`@ znt*VllQ0cGiKQ>>$f$WUWM|&Rh1VbW`F{ZpQwCxPZU{NY7(L%pC>RZPLY;L5%vSs2 zL*L#M9biLSZT6${hy^_?+K8C>~}D|HEXfn8iB zd#H5|3pIl%M9vt&Uq&;prVvb0;-#JlUg}6)W(qX+$u4Xbx3OM{a^kj) zyS%1!^bwq|NK@ymqpV`>9;`F_jc>N^Sf8{LY+KDFvRK-VtxX-cH*+UzS>{2{I+Ree zF=`BSD(y(Qo|r$NmG2@ahhu#Yc%ETBR61wuF6g{E5ycGNr) zq6gShO{OWmnzf&8QoDq|!#SHKR*edF`_L2V^RVK2$pXMrp6Q;{=Qf^AGiw3ujtdB zEL9pFLt>Y0I@Gy0K&;*8D_XNo!qz{OHXa*JPrOw5u7Kxw)%+T{Hy^UErRTBys50sI zXo^R^iK86X?-S6kDW3Et3%T8f)Ns-S!@Nz{>2dxDeQ$%*)_e>Z+|Bm9mO$lSY4J8E{_P1e z5S=Zk#<+uKG+jxCT)20^wq?_HnF&OH!^Fa-O9&e+LFyItENkHog#ERl^KW{fA1Ol) zpXI1BZZwq?x*|c=nXaiGN5DBnK)jLOF@uRB;DXGEfR_mKKg zns+6})2e?=^yYvW!abYdv0sDY+@z5pibAzV8ytoT2tI5;KW<9W?2+lr=b{$p`A(rT z^`^XU%{h)^i=kdAL*=v$HuBBF(Xv@&$}@_RUhjCeHv$U_%dp!!U!*_6hv#kAL8Cm0 zMsmJ^UgT@kA5`XD86C1suZH#7@2up3DYYE4AU8`l&YV}pLSHS~r2ifXt@0wt{Dp{{ z;Ykz9rD@OJDD2Hy&IUSLiLaXd_BwUY+Ux4kN=SdxV&b>wv2UIxC7d&5*0bL*o3W$F z;AIQ4lYhf4{;OB{AEFui=CgjUWsw*7fPLAYj9}jpL_S6QyyQ$@TUBzscZI2N{+sBX zr04KpTi(YAg4kPdRI8J$j+HYTH3JJVFAp&WH*?*3^~OfuNs~QQa^f~?Q=Vc-FJr|WU(1J z74triML1Ubio~mYmtd)qHdV}$C;d+!glmQ>^v20X{OHmtd{fSZ)UiM^2??R*m6wDl z`(8xyN+i+Sh#d*6!??CLIPFd1?xMw1)w>-IoNJ+~F~Lg%Qw5V+eH8xNjPb`b=*({m z6z&^C^QZ9cNr(#Bj(dp489i0qgE}#{cbeFuG#;0A?dX=PA0AzeppDOF;kn{!>hYTh zsRscpIVp@PPZ)@=+-c)q4bB_A zDr&owgEJMiC{EVq&)q3paQ?~0FVcbkWkb=~CSzDAbU^C6G09ETKxtSa6k1;+V#Ea` zcn_kV<#KBLWBN(8&5&}9DC5X27PuPLPeS?RexQJlmc7Ylaol3LQUv&VLM)q z+kjo45212`oUp%b6bY-=L(hCQJ=~pv*_)rEeTFnS2XH>O`Ez_IbHIG2Pdj;LamC(5 z8mf4K=YfnUdB9Vg_$1FFDnem*p6>?9`3Fy%4d_~4%I?lKCS}uBQG?KjeR;)my^^M) zcPnmVpsfxC96irc)TWTnRv&79{RY|RWT{QkO_aPXkfby%nBkQ|wzBv)Yc~$Ytmu)H zwc|9>Zds8{FIiHEQ-8r-E)jXPP`QO&D`s znl+qn#Rtg{e7;~#V-w7&|A*`Nz?uK&)K6kwsVRcoD zNF#(kcR7*d#SQFx?ok9JkHa1ddrF?M154`-P}NZ(Hrl!lj(IZF5F}6i?qms5IX9{L z$w={AshvpMybZ;*zO+bQ$6;d^Eh@k7HX0FROZ3p(RdA`wG|YN}=}r74&*wOXtjf z;L}V4s#}vzYMZA}R=`PdWlALezT)h!b4gSz@er;nhVk78Kd{lD3z{#~>5<=Tstp=} z$93jpwJHl2uX$koi?Mj{*^iRr)eyYmjF&|CI!H;?;zB0RENY&^GM?*A*m#FcE_0(t zk;)joQIf9uaF6+JIqT__ie7_1;aS^$jC#!3VEVo2)8nz!;9y3_C(Y-KI&(U9E(X!+ z*{D3-&Dj(i;LLM3xHp3a?9e5z(SNXefInn*=Hfu6yRa_pviK%;=h=A)UEyg3`R{6g^*-HcfbeSw&MkdvDqb6{kSbZdRo$ zbLL^9tcTaoykPMahgz?Vb0me|Kg+N#b}f4vcmaQZ_a&EeGM;&>^Vq(TkyOmH*6uSn zxAVziR_;8EHqV>K*jand?(d5ukNBRH&5`ulBqt7PPxa1%F zj$X#9)0Yv@(F}vf+#AX`FX**+(@(#C^qF(FM+8*!jGH7(3XJHEpF54vxr?WI*0gQ) z8pQJ%PCwg(dU2*wjsH0MEZ@Wo-t5N1u)i?e^T5OVr@k;=GoC7K3XtUJiC!=u-^6pA ze>NIHCiUWKp#W}E`cm*u1sdOYL+IFLK)Na>;-=u+s5RV;*=a$fkUEA6k7w+YQOHpND* zP&=T^w4f?aEE~HFWeeQs$>9Z@rKyDVlP2P8*aD=MSaW}} zoJC0QVO#rO6?AJ`pmF7@$iV*@vfTPoRInVKvoj~tznN4rE|Iguwu-Cjm*b3jJI>ei zg;vBF)Gu_U)?#%Ub4#9HOw^#q-$(P#h&SeKvZuRE{H|G?0C_du1qpGdiE`)Jtl{Cp zC)K0eKd-@>%HA}Np94nj7ucm!(KNC~&Z{GCEI+S);v4%58|joXEs8rp#sY; z_Mx(WoKt0Bk4=9$XIOhYm25C0wL2@39J?RZB?@%?RW@QnQiOp@DWqkoPj7?j5tFtA zX}x3N5+W;JSv!_&rtQVNb&IJX;XI1oH=)r+nVuD?QP{on_*-#-d3hSplzIzFEM5nn zRrlG6%Nk_#=K+#^Ckf?o30U0aO}4#xR&Zki9(hh@Gd^q(Z!5p%+4^{2)wDiav98`m zD2O_W8&S3-SG$k3JHKJmU8CvZ%kPljea%iig^SyId}#2pNkU{uFEoFsWM`h3Bc{}! zYTg_}^dx=yZYfQ7#m`yB_IRwAl_U(AIf#^=??l;sS#syhy|%o^_%z&xo{w{(;k=JH zeu52M6D}adX9eb+dBA6MhBcO!l+QB-={)~D=HYD|@#uoI_CxsiY!wzSwWN_sHNvoh zW6XGYE#6*NsehxqKhIe7P6j~>NIQOPqYvE_1U zD)q1uf9D0Zf4 zJo?Umt4AuL{+F(a;v<8A#C?z+qC&H*RAG=FNs41;lg$%pn)K-=&(xZFy_?wq%eYK& z?UDYFt&?~=QWW+U0gK4dT`Pn9XHrOqyyJ8*4BZ|c3|Cwhi>A#J!3<$sw%JFM(@_h18@ zhq#PNIUJHLa54ylR)4`&?q?Pm*8S64}b>B#6Kd%r@{hdq=ACIE9D2Z;g3vj9b zhRRnG6mU#|V!Cg^#$_F|x6q|u1qQT#>KZcp62P{TE-ia^6^lJ|gy~y>Kbu`C_SR>Z zDQv-&zz_kt${1PEOQli8xmO%N|0B;=ngzz)s;YvJFp1AQWB`_ze)mD7ucwCK{^ zH5S6=^kBOBV6reRJfG<{e`Ze074Ub`NYZ(G6+L}9+oeQ;$iWC>7feKJO_Sh$(v042 zIt|ykfAEs?&-)zU-0CBaG-Uc);eOH@lomMAkD7CE>`B4FSz=spn+GjzdkPP1!(itY ztcY*G@16!M9a4{vGmZ+m9|q7m*M5TIArlsMo^#=LTI2HmzSO73f|4!nKzeaM5{8_D z&G|A6aIvM*d}}h)oKE@b=7^tr7NK`|M!PzI+%LP+s#oENu&zR5CQ!Z5j#8fOhup3V zEIRZ@tiL-A!=9>=;Ur~BvMm;*rF!5x(?`6RYd@oYCi1@UDB5#~XzkdoLU~pfY%H|t z-mc*+=%^O88@__{&_UvNZb#)EUI zF?cr3eXmH>kINx3b}@4ce1!x`6vv*Eg)Q&v+g6@pcF7Ycs5T2pwLJ4b$qII1uSIzW zD@4*t65IndcTF5Bk!Gb# zLW~OCO_@urVrv@oERvS5F(ajMz&N!$T=M%1xu?k(xWj@ptX9!luIG(2c#3Rq4|JVe zfW`<(anc`ua*jWZ*>e_l~{2` zd5a-@lP*mkSWGxMHABf zSq^7;52VUJL_aAz&Ydx)?SFsceJW?eNS?;R1E0{ueT5ChZq)tT}=Z|EEad=lS}|UFcU>>(%%pn%dmT*cQ%& zF+SXhp!0m+<&@`)l2_qKP&edWow2Ve zz3h73EPVLRv-sXzGpo~~RMk2Rem55>H5KqxQxy-}!26#9-*3||jwT;Hgl5^-2%V-$ zS!H~`MZY6>zVQaT#&y=t-v%UkVj5_VJ|eUX$$xGeCY7!eT*89U_RpKMcmE<|NEAH! zYr(E#rFh-7KEgW36t+O~A{N~_ESeNuih%>{=xy*$Hg%&9iV0L<{2VWD$k6fe7M|Dh zK?Mh7#fc{Vcy-l-E!W=9%6PA`XiO^C3k^xrxSjWbZ?hhUp_t>}EDAEUraANWB2Z46 z)-Ls?vp$0TPP z$_V6t8V`#S)=Q&V;yxCu=u`OB^TJ`?S(qqqh3COHp|-GxNl5gkP35aGpl>>6wS5$S zR7t>#0lleYo*ebO)E2AoZsG!EA8}dT89bNr!Q zP?M&EJnL~Yd%pP4@n!UO@jrZBc^F5F%Gsq8jTe7sj>G(_+kBU34`#*6LT_;-)h&vr z+Bylk)_ey;vib^1xhJ4@Z-KaY#B`6O>>)I7^%eGL!U)o&KPd6sYRFa>h>B}M0RSJO6)BJBO;K-YOL;r6W~?DoJ+~0^1gJj|p$Cv+AH{>}E3WWKH1vBh4PO0U5cN-}x4OI`3k;t|f&o z=K0yNeIQYjwOG)SHbAT`qle#r;mGZ5+>qn!y8>6SW3-tp^B4_@MW(iCGKk#nI;jPKJ-XvoYN0K_nc(U;v?D6&>+$j@5Z;G z7GyW~4JxCxu%xgLeEl{ElUn*wcGwX}T6d!&D1@e~oxqi8!q7`o=Wy$`M$e?#vBW}*Yi(sGW21N68G@> zz))QJF_40C?TOZK?Y&!@{%T)Dk&sB$Z<5^+>at^9tw|+Hbar$`?Za3copnbrkJ7aP`hR?j@Dj6jS3Ppq4@|PUFGHS zM$*g7dkun*@LdAycwe}eG^{k|(WcI2bZm(N{XKsktvdpQ*Z|JXyAUSs8aF0Ozm_cL~IAAepU-+wV#k(7KVkuui^$MnpndTj$*)X2Nz{ND6w-!H67 zT!MFVY^cg)Hg#3^AhO~IHE7<%$p92Z&}F8trHvD zq+s^F0c0d7Q1sJQ;rt6Z>gyXUc09m&qzeb(&djmYu|JIB-q?y)Dc(Zjc}bF1*AY(r z_=WsPo_|hSCGN4CM;SfxG*jm)tk=}CeDe@0T@{Nb%_neTwGvIpv%rh#5oD{9Ky$aHO7ScJ_h z5z~Bz+aDyc)rtzu|H1X) zJa%QQ5>3e(Nzu=B=v??_o^LG0e#Mt~$5|_H-@X;TWQ?X~o2BXcvCk-J3d7jx8&EgL zfcfw_wU zMw0AwojH?g9x_`jJ+q>`XslYqe||D{934epj|jLH5jrNEd9ZE zd#dcktmHg&-?N}$oO^L;D&LvMSzq-$FZ^J7D6VPD5sIa}NpbIWq4UH)w&&snsMz$y z{a9o2n8)b){XLkrW)K+d(u~&6AuT zonq7Ke&WQQ@hDr;o7~Rshk@2Y=p}24GjDkzx!#zZIH$+_g`W7nj0%nTt}O22JhvS? zcHmS|D23i)v~JpcK_f+i9((?TNFj$c<08H+?Ltw=DIsR%H2UH30@p4n)5~k#B5~&~ zVV9pX5IYNp$D5PdbSnf5A53+JCz0Q4Im-Of3abV4MAF*r(3%q}mT!*0fmefQ{}~6= z@cVk_AV0JWnn%rnyWl%AiKT9xL8|v;#A~<~9_waL-r`GM{x!2O?aVVAiZ!HttS|1sHmfBcQM@^`HFA$~tS8bbA^{VDcO9v-d8K&eeV-Y(9? zB{$ytYaP!uO$}O+@U((!$2GJZ@E*e9zXTIQ3Di(yOX?rJd_9{j*veJN6MGoXeF z&NkO>V?JL7(5X;!(m%42UPx?W@s51gRrO;is2voUWtCv_P#fx!`H1RitFR?co%gB+ z(9^UBtfYQ8>nN9{Z2!CL3xAg^E!4?A=Md|j5J~^-FJ0cygCVOG>EOEYp2IV}=u<)z zTqAz7n%nC@x|4W^g?IDS9^>4q-!T9C0h_En@GKUb?Zb=XcJ36v-U0t{cYCIdpuU;h zdkh&&Kacbl-}^ihx89hLobDD#|FnjO^kH1tdmOU*{Lb>L07p1eVwZml6s8@8X)j$g z%xV(KlRaq5h$P{}I$ivJcp8!0$3dmYfEI^3QqR_7cp7d)XJ>E5fs$Z6*X1mzcWxwS zJdOg+UV zFYn9mNqZ>xo|L7-Ee_&&ag7)segIvAys041h0ODY2|G{xM1$iGJ{#t-rP+_MP=X9$$k8{*a+NE7GIeo=$7?thuz2r{{)NEd{wc%4$^ zm687%(?%PMTW7}L^Ex-0lEU*-Cnj^fY9+F!#n86lS+Gr4VzYe;*rV^=LUf%hy?&-F z6t?WcyXFTXOw8!L!+KKmb0Pmd4&o8cw_(KRR=XDwWcc(lGP!m&Nl%jIr>jzB z>MOV(pFwNa4&t+DKT7NnF(3byBDL{18K0`Zb&_w zN%pZ%F**MwqExlXagaRS+O!XEKMrNu%bh4w%br4S1RznJ=ThxB1L>Ox)6ZQK7HbtC z({Kpw`y)+Z{zbSv*jpiJ?!Sv4G6d?O-aGG*owVHY`4`& zT7Iex-6mc5CcU0b+vY`KN7T_&^^Uc*1Y!2yOf2P$lmGb7|KsSq1F?MHH*D`cBBP9k z5g|>y_j!p*X$fsfI}J@~qDT>;P&A}c3P~yJeV%BjR7gWZLsNa)QHg%{_xE3am0mp0 zeP7pk9!KaaE$--(r*XNRY`Nzy9LP`-td|aSH@%9PvjjM_+`x~Yya)egIXt`%2y?D`P)0<{Fn4!XCBG`C4`qV=c&rdOOYFj|xVs-yN^H`vHPU3U(JFd;zB-*}< z<6MbeB%5!Ivy;C|tg`s5?d3qQU9r%O+sBUg>V-e!HN+2P&ryExhS;*qEfOhj<^=lwo6qW&PD0o7Lil-f2|r!9hc~?pRx?*od*n&z)Lntb z5?R{!mG`oZ^Z0y3mzAGaAlVpoQkYSPO@nwVBh;X8$Wgzdw0N_d$JB=^>R1L9IQt7FZIXHZYv?Te1P!fpEd=3 ziQs%Ug0=>P^iX~Uo$t89;85S&pLv8>?|JR5Y#V6rlU4!0hEpcM%VA9|J4r`v@ z!?5+2B<+qRIKIV%;$8E&uT_bDDtn4=`#T~?wh52A_rYx|LwfPgo(vB1{+}^tnf*J1 z*w#GsZFQs>MX{gbiau(mY8q|FFbs&sehf%kzQ%c9I95b;!?N z$EN!5y!-5amF?SPNwM~d#H&{s=D+r&l-g66cG#J|HK$O@@-4J&+agiI-M*r}E463m z;^Il}933-)4qPtB>jB-V`^GvHJddW&w*&B|;tMKqm)uxhJ~Iihb{!FJ1epJMQieMKyX5hG>&c%SiTBau6Z?E$DgQ5;SOd zlYYM*q!f_IUVYV~>rX=Hc9b`LZ5@v(WqYCe-I)g3$Kv9}$#iynJk_^xw!A&(@%$Hs zqEn}kxKk7g!e)_u##N+guA{2;=~$fq1irTv$$Z{-O!O_ns*jZ{=(ajp4_Bvm8v96a z@kEmOOPpGzK`s#|Br#5TY)sW^nii-+uj8`Of7owke!!e=9J($9d|rxvGqX|Jv)SX% zS`jTrthfVHmyO8>^&7x{rvG(er`I65Z+KIhU*$zDd(6e;*@xgb&jcPn2eG4(Lnt%u zG`e?s(^cqG^oUVdSF#wn+BVXZ7jl$SF^#*&J7CRsI9DX?_&ZL6z9yy!TVKt?8p#~W zzS08IwMTK*s0h}pe4*s*O=UHo&}%{q_v2)-Q*FoL#XZsCM!Q8k=bnN&oDFMfg^BH- zK{bBZmZVEhQ+U_+K@IXa<2!x81#DfChO5_{>CIhL8u5ZVR&vh?W%Z+BnrcnE=eg3K znqb;BHApfb<^t9{TEQy3ZE1P#Y1sB!!}|fh#J*AdeQc&q_3p-G3x#|ElnmDWi~;2smP6Qjmn-wVVp@aXhAp$=^oTS(FK9$vJtk! zfqsv0z$81+DWf?0aloJUb>ke>26HUgcm$Q9-NXfFe8^6wjyuhHo+IlT3L0<1@|gl% zOi-sm&k}I&jjFUoP@_i&3@J4I8rEcn(mx~al&$5wl^G+2>0*n-H)k!K{b9(@eT%R$ z@(kPD`A+bN$dURDbj2b><80biM$?*+czGa=QjcbOH!P*>;3T?`DMusaxbrM2RS<4F z(It<0;@jFHo@r@d7r$3ZToT4pc0(3?xL;{psWSOKc*kCKq+(OxA!+|L2IL<+9vR;+ZR9N)KCXL|uXd$dZH z{H{Z)x;|aLa!|be!+>;~4awHElPxK^2J25ou-nT!VWI=I&-(<`G98MWoDWuh6^a?w z)P6vP91JbV^{lPnCBF-;876d9Q(nxs?Slln06OsODH8RDFq7#fIu9Y1_RlDu5Z}Ap%aNkxdiR&-p1T<_ zPxGhuVGPNZJ8-Gcm4ZI_V}9TW$=9wZdT$N-@aQFG%nw6%z5`WT;v!CO zo=M#X@lIdAwbVK07#?T}sMxJU``+r4P4qs<2xFy3YLv-pQ%@>Yyo_(I0i12kcTQSz zYvZZD)TO+rcmfqE&r)k4*cbAE@fBP-r`mG&8>|YN|Pnm@?8lloH-|k>advE%e z`<*`Iq=9m}whcNx#~ zZqSaN`q*9`i=P>P1jk`2w6J#&?AB@0*vR2@W&Az-nEeI~{sZYRXN*iW8b|Mcy+nro zd6-=Oh@R(`;%VIglKY?k-QpcyUGK&+6D}gTMu$!pm5LT_hO}tZQc(yUh36w~qht98 z=J3*lhVJM`|NI;Am*;!_TXqSD8#!m_Ob?pW*p9xk^My%avBJY4{_x*yK&mo*XMIT;M`?=ex0Br%&)JNvvRrygTH;Xv?6p2#;_j z*Vqi2IcO`@I?fUEp7ufYEf?|}mWrVd&q6mCbThw-vz2veT3ZE*wnmf7*fdOYSEY4( z;_yD+4Ldg}L4D&0+U78qGaMZ8b?rb*c=i^E6S#{Pq8=vg?3 z>N{gl&x=%t8=}Z22|RPufn5RdNDL~))NwzBi+Xd(%drNzDjTVG)nU%Ec!KF(nsj7= z5;ZAg!Km0y@`Zo)ipzL+uxA=E!(jYPZbfmX2C0{uv%4+fY(~vWI&-rNDWh#?jcKY$Ei#atA9)j3_2kU}Hb~&}+95bTd$c7FXHP^TsE_r5&K_ zrAA_MST+u|TfrmIhCSRpoF-r3y{J!9NHV({{c>}F@uDPr+ngrkq{+~d;{Nzzu1SwN z#!^_f_lUUj6mKJ*3uTMe;iKzR(yji5*r*F=kFCSHe^J;THH=mnE0T^e@1}e|&t!71 zV%XGf6z+6bbiHRzA-0ReeEm^au<#>nZ@*-@ng*opW=)BA1!xcNMHhL;?5Il-26G?B zu9HtuS1+L{>JGxL3NxG<*PS-ixYMkPNo1YnE?ulqfir78n0&Am%`K?leES%vZ~HAa zu98EjqAKNW=le{rV6o)KZ`7%55bwXZfvs2K@z-=Tx!xW|nTsC@9!Fn*xpt#;`>n$4 zE(1C!HKCc6s^a-QbIEt>Lu^g>$>(JHn9_fe&~Ru7!iRmp?{jU~72U-KOCu<6;#Mm6 zRHmv0rO>|+D|S`xzyy`aV)3{CSi!9^)U@_0YaBG44o2+9Ec@LQ*SZli_EigB1&ZQu znNpTJwh3C7jU;UGVT@@TNkDAtpyx=6tM%S=ARd#H(>3W)WVKrNs#`*UhPY`#}fh`|@4F6mP(DKxD+-y^$ImiBOQFpsp4dK4^B z#HUfF^trGXKkt2lCVv;y1Q%hF+#?iTG@@YVubeYtN7EL5mwI?@K?6+4JH11Y-+YtV zJm*=C(3falF3Ym&IWOO0CvJXA#a8n+@#rO6G%qoywbgRu+7KcJ@os@rz$`JM@F_}c zrr<^5RQf$^0%_?eiR-$5!IRMjRH>sTK7Q7V?j<@>$X`S8uJ0=9EvrDKD)Q7nNfojD z9(W*p7WcAV=2<^wIv?PNm$O&X&WbQn3RR&E#Ca~O9L4B9(b#9_FZOOU!{x-0{-6$?a_v)+p&vpQ0pm z8y(DN&ciTm(L_q^JqKNUw)HY%0Ve-_fO(?~X-)GD=y$YBF13xLp67>PTD}fk%p58F zxCVLjc+2iPZH2#h56hpGpzRI!rS3MQtJYO))&1@i@n{;|nQ23#6eHp9wGBJ%9Lf4> z0^Us8AQT5h(B3sxbX4aHGQt-?<7OqSPMr`O^=47=&pp^uw1VD0;CV6Ljk?TdB85R( z)K*`JPd)Nj_6jBXG=XQ{rBN6aYL4Mg3}}k@n#Hv4>37f`eiz@tWL5Y zPj`qhF>R7NZ9Q4s(G2Xmf2(p`MmF;6`jelej6K?-i)eoTS;<}O!!GO5D+dd9!JYGp z&E|^%!6#r_-HY`;dV_6RG=={1JRsTFlAo3`jnU6%eGiR9=&>onp(lOli1%iQzA9w1 zJCH0gpTdPRB45?H(}&)1nAI_gwl&pZ+VNzpHM#`dC+b+<51iA|1*N9zxS4nnH)`u} z$+r7V-wzS4iO7C{b9$?T;`l*W0wVHbnLkwr)|Po9w)>-P(uF9r?sY7E~hI*@C^@V#8e-C!t%v3(k>$#YM^M zaly%e`h@7xBvU&vyW}Az?{O5TeXGDrQ&oH%IGzUhOd_*>N@8uuf0#H+i`?j`#3=;;h$ya`B-si^-sxw8e1CqH<=yoJD!|fB4GF`f`&{}hGa{D`*A1e?bX0B^OxPJnn=Wq(l)vO)27gBa4}v1pkt6xF1%8UnP8p7o|k@%N{fN z^G0O4k@qpq4WM6Nxg)417E?IWY_d)+e)|lia=He4>B3PomLBtY<;^iXJ6mu zP6s6+)zi3P?Jk0F2$(7+Z=3NE}0tuG}=;Lfo3G5>L&u$*{N)r{&^+KGM%HLN-L1y&5Qz!+B> zieAaxLV5)#+R%g4HWc$2UM}wH4xrb^`cpo4Cmb882)&`_pxe`c8dcmWGfYAwuKi(0 z^aO;lo$TkzL9{LHGM}&QKtHb!qW0HRh;^Fe%y(C_o*Rgx4_-%)h?!#lKgC#`G91%X z$I`C+k<|CpUm^Bi6E5j-uiMC%9zmS{tDN42su;dA*u9o|y{pBSUQZEt_AV<>(UlGh zjD?lo0~}1(rjL=<$XdLJ!k(|C#}VBq&*KV|IyVbe`}V`V!C%z%wqd8|*-}QGJl?m? zAh&M3Qx&qB6bEpImUn#R-xpV;#^yD`u;@BI+bkC3m=r7aaIfZ`y9netxbMO7bYONQ z&ABIto!g@@!08uuuoMb>mW%Bt+~`qm0~+W3#h|8VoO>8W*5?kxvuO~0J;dEPy^pc? zbC*F$Swf11x0sB}dS<#chr2Ol=t|09N*K5YYGFmpf~whDQ$Gq3osdwkMV32M=yq!u z>*}{0-QL$?smn3w6q-_Ev>~;xpG=E*A0uE{DD_scC&kQ7yz|c8&a-T&bl!G6&bKFX z>t%G&(}H&Rcj4-33EniF!qxe^1yz+vR2O;zu1mt`^_-iis6P)6QyuzQsYQqMGVrX# zm3^J1N%MKX-rr*)wyzH*Th0YATk`^wmKY0rrf5tc0Z`ge|F=A#QW+ z#}=IFkeL2ijS_!fkGYwpn91{$sw+I1^6U4kYgqtQ+bUAme|luG?jkG78AWsbqD0B$ zOW4!0lO6b=j^X@VcRnf^3nn{~_Vji%Ye?88F$&-N%Lw;A_n}W7Zur6Ht|i(2BzNpC zgu|w^ne!KBZVE!Sb zqqu-OZB{+vee$CsTRgiGE6y&BkE$M)XPHzRMeCq|q{p zZmTvhcm91Y*XKFJh1Z0$I*%p#tt;u(hYKTFNG;>Ma_%v@tZ zL;bsxO^37i+^h{7f4Pg!HILv>r-|2d0!e>B5Z!;`Eb94k9~;jj&L8wr`Yq=bdY9`^ zRfnGl{=MvT>lm#m$^^*t%GaG6NUFozIbqr*9+$Gi?f-(S|6a zx(bKk7a==lyXdtni`iQZpnw1TSZ~(=$)zm;SpC%$mPcKX@y3y9Pl;hO*4PSjdt8LJ zPm;9CstExvZ0PuY5#vr;(`Nl-y3o9ezNEK`KBJaEjn9I8FCK?pZ9b0n@&A8@ty+UN z?d$A<%&QILsLT6}|EwvuEdwozebDu82MnzzQhMA?Hl*5767N^V`-g8);b==6lDV&R zrkZ>E*aZ|&5YAg)-carH6_VU8JQLrt(>63}Sl)W5iPABHg zBF91Y^erzM20<|><5{qmc6)KYbdhi^jCNw*+|F~!Vw2l$Y;|#YmW6a?1XiiQwoFlFtDV&sB z3GZ}Yy8c;#j!v5d&w;zy_2w|KYu};Dad|yhr5Sg~WV;I1&ZStp*Oo$Dq-@|pJ&bG~ zLE+P6IGa+7dOcpxtZPT{=g?|#_^~|Pm^YruPO4*{cTXl*?1y)~J?VG-#fRxJ%&c!H zj5-DjDL%GT@_YkKpDI(T_e9bw<~~p-drIUy_jG=iRp6b35B}GXp`8Z1F<0Sp+6o`r z2h;1EAIR8o1BZ4MVD!Alxb@)`Y*f9(usVI}jP@2MT7G10+wO8+w<_MA8bB5roJFh6 z-4tg{X~lpvc;r?$1ns)?l%MuX0QTEdN@VPkhA7{e!=+4i6eMQy02EOC*7bV%;NprynYmQGKxn=(3 zeOyuOQ~d@P&+AiI*(<4s`A@vKsYHuz1d9bDR#DHgX8d(|%NfL#Z1&_w9>;!#wly(pDU7r$KBWb%eClz6Xz={ij0dz~a) z4ckQ1$-^EHtfd!i>+Z{1#D*bY@K(gHx%u$!4-2Er_A7 z_u|K1B^t%~mT{x^3H zKIj)-Ig}tXfU^!&)F{_N6CGXMNi$?BDzXq&%B)T;0iv{ebgT6SDtn&-%wsAf;m@4!m#A3cKZmBVP_&42jw;2_I7?#=n+s={p1oK}3?h+Ul>2)aFk^Y#_#+faMT3m7Qm z8|;9^0zc}wpND~4_G8tmb9j3*5c=IcXh*FKdA7cSty3Ao_*}a@>pT2Rti^twCe+ii zyI2;(^SP&ABf_&Ep2pbIlO|iT8;F)NwflB{V9VE^tGrYFhL!-I~PI&TH_T@y(gJ@sgb8E2IZ(iIf7WAU5 zge$0osk<7zAFD#Kv-hA`?VaR5#cpK3Mw41~79zZ6Az44^Msc@W@$pc&u;|oYq*+X$ zfE&7$JUxv6Ud>^9t%ixCwn4-j3wvyD&G+?@Z?~KmRP1 zrQBYsbiU6Mc4W;&3h$pJzO5E8-YkwyzTO>H-)2y9<4!Cuu%nPQuVA?UjfAx&p>mF= zG~uHiowV}C4|5e7tUQMPB(-4VK{M(cGMJw9n}=PMW4Q~n34LcTMf|I)SfbM%_sr~Q z_2n*{R=tXpIL?TVxsMj5*EqC$f|$l#I>$8BDfI|HPnSN#JZUETCud9Z+J?}loO-NI z>PrL1q~O(@denTjrVpEqDeJcnUEcDDWw^e8j*%wS?avjAl0sO=jOC;ea1R%C%A_V6 zxHn8O6{X`2L0a-j^nJP*It^wtRboL_|9Xi(oqwRY*iU?PqY6(p^jh!}afR@}mJw^$Hd1oR`v+3!iYOtOJ>0|JZoVF*N7$Y8<6Ztl@p2 zvbc4;PZ&fipKKu86PmQW{yV;m`zmpHk&8&JonpiAbaq#77)>7ow#3$kZHQcjxLg}V zAMwDXrFzUqeH!zBFhdX*6~Ux@rI7Zq0as-EP}R*woL|wCY~H2Ol3?!3KJ!pK?6?yZ z4>c&lJspk9^N>6%fV?+-LiQaE%IhUZ7anh*sOmN7IL`V1`AK-XY$(?DjO9I@=~Vts z4I2l@vH8a@%r$+`Z{JbXhbPS$rVeIKG2~vVFpn7Xhj<9=acFJGqP6*!_}r@=v`H&CH_%(Su$HV#r-ZaHQb{f z*n*Yq3!yr<4C`bK#j*@OZ)nNI4}Slh=XnuHOW$GqZ#}xXUXR)W3SpdF%D#KpP=!_x zI{0Z4s(BzL}|CGe>FQ6#5eS3}t%UsgNNj-C#S4_FPI3=H4=d*ZxlS^i4b4 zyniHJiciGJjNYX5P?_#Z9G`+#&@F-Hh%y)^Y6{zpXF#?e+CZuFCojGVMSMwpmEfmbcV=_ z_I<9g(DfB4y=jIhIeXgG;Y8CmK8ERQZe#a93Ez`xxU|cGez`hR^3Y-IX5=fjq_rNc z+_QM&u(mKda3nqa<&M4o_$;zi4n?bb@^=DvcDBV~)vecJ@{t`_cBKdXyWfTEZuVkl zgbdkfLd-Z>h%3fDpqcDLgJlA#n{Y&UoBtZ;?ip~;=y0h_m=-PKTn@WMQ$@p(;S^l{ z5Xau!MD+X=NuS@Rgt^a#V%NYkII=~70-e<0eJ+&7{9Q>s8cZLgbv$WOsdnz%^}b!vjN1ne&8Axj&i?c%DY$TF$h(q(GWk1{kn*2yUGA6~1^_ z(x*?ks9W$Ek;BH4wCM`8&p6UjZ96gO1b2+i@gOc}@g?0}K9u|}00%yb7~3j`h@Fm9*1Z7! zCnFKs+9GZ?N<-@g*|q88wd9%ost5W+lQd4bDc- zHlSfYhB2jH`c(9YJEk5Pi}uxPsA={)STykrOo*INadE4VVda5&X16i=f(+@bk;9tY z_0&9jC3ShJ(arm(&~zHEqtE;OIR5904YU@!pr_mxT`#v{2x7oW~dSE^Ut6T*+rx=$x|%kzjNE0UvpP- z62(s_K*E)AG-psVQqBKhcgi=UFNma7lhe@OiqXfwS=g|rI|iQeKq2Q}tv6MJrtC3E zSY;-5dnuAd4`<3>v;lqJUt>`}hOl7|#*x1-I4zVV3F zIgfp^j->M^(S66ux14uAhyDb-#++?()Vtxcq&?H0{!FNmresOboTY=$dZzH;U8uS_ zvB+)iOXGq&(J*R0Q#6RjzoSJ`G}+MgHH%SM&HWN9d`Zr(9+%{ZhM3q>TvaB>9Im3~9e2>$2QiHNp8h;ekB@Ar&xfBV8(}EU*U=_hZx^xU z*A@QtX~Ojj&*OfaPKlmM;taPYe6~^|ox6vn4f|Bclb5zEd=`nRok{4jCWyIwgd@ZeMKC44yo|;6JAWUpovXwP_y8f zsC$XzHg*k-Rox_7TkeF&vuUM4+&LY={TJ6})6@U457)_1Sce?>Hb&5|ZfoHkWJll2 z7osxV6TjPb;!@>kifzeYSzi;RnI`#YIr|lRmh~Wo>pStzBv0ZoY9@^;EN6?4#NmH; z{q&Ab6!v?}N~`yx@NyG8W*)=aBpVve-7{-bKd@{aJu3H|O)`NF^e$oo4s}a}u3}%Z z8MYm%!-fcr9=zW<#gv|J<_^s>k&wS%2A{U?!s+TjdZ)h+zn`wBx_9UBX~Y$*^Wa$< zbrrfW>oNxJPh>N`t5TYwKFz$%JxRTdpvc+QBjotkTbd>Ou2>5Bp}v%ytxT6X=W-5a zEbDx;Tr3GVE&X?FCu^=dfzG(z9=*>W#M`FcAsW4m#t%|S@k#0XBbmrKs=jb zPo!ZOEj~~?gW4^9m{099c2#>4joqAvJyv$~`wMr8+-+md^LN1i3kijMU$rPB1m*wa z$-8bO1>Jjzw?q2S{OgR4+a0Rxd@OZzW_nH@w^5-SE<9J#|Rn?&c9mTDh%E89y=tOJmZ@o ztvT=ujfyJNba|7geRDINy7dsJS8%@BvombnyJ~62YEN7ZEkWWzIT{d2$W08PKX;c> z$W?8+)m4GTd((wWZTrx5GF{X&J|+2k&6RpjT+Zwsj3$*%&L6zGk*2#Dq4$Jz$r%mq zaE!Vw_(s$t;bx(9{QcWlzRrV2eXqx`982yOil<|R5w!AShNw1p0p4;p>zMddEbE?z zTUJEdX5B$`wKiS*-GMB5IFQeuBuMa6a%jRGrtsn}OUzNml1dAT;QM|t`AK`lwAyFYC~)`w0|(yPc`% zDw1@WJn0YKCM;F)Lh(%>>fc+Qc4;PKUdeFwWWEaTG2{s8bcdyR9l@lt?!uqVyU>rb zKr*Zku>B8%X!#1FkelC-5zKvMc0ufInlDWprh zI*6|(IXE`DKlDdQNVTL9X_Yte;MYYgd0&R_hh!+)I$nI=WJy0KtBEFwmss?Ok37fP zABO3@=+E+=G~BKVPCUExqWKg?F3f~gt1fIqLfo^RI+sh zS>%6b?W^u!sKHpa&dQNW*(IoCY=_gDFJh5j9E7dPq-5Nko`yXT##jEp^Gg#%`C~;W zR~>)=>)|wIpckd>YZAmQ*^@dcjZtUr{JEJ!#?5b!^g}u zbBeIlZ#v}u*7Ci-BE6Iu$8$|9XieK1ntj@s3P+wqic-4ZyJ{P9%_GIXo940;JR9FI z;RS1o^P`Mzr?Ho3Sf(xE?CT|w9u9l=2oe6xLSXq@#I1`IUU2Ra3-P9%kLnRP-kEAM zcaZk9b@X!35%HDcRvfqd#`oJvbl}@Qxa=K9lV_eqzgjISRg$HIshg?GGXsyVInkhX zNoaXi$(qNE!?$#PW}V!{(yzZ|5qc*Ox>%0BWDO?y@EkmuzJXm)U(a6LnnOM-%h`^` zepIUQ8TEbVvL}uQ5!`kkGmq^-G53>9kQh+>CGNKS)Ps8Ghmk2~^-m0$%y|`Q`17SN z@x&9lR`jQx<}0|>)0kf5d`125k(i`agkiFeghZa}Zb~_Zan0*!cLwiCT&;xn0UbJ0 zB~Rauo`k}Bb>D;Ug?h@KSsaJHUo&KWi!5ks|)@}sIO{9P2-6(ot2-vs* zY^c^C@sxs2<;C)C3=0yFBxti!8Ru}cS8wui8_U{nzF_m^C(}NG^MO;fX^dMVGl=pb zhnQ5c=eHX;cy1f-znFI)v`On}IBEevWW>f_a5=vDB#&YbTgsxq=hVv34Tq%Qc*g+lvv4 zPGUvgJ2<`$6CW?(y~XRgG;?ejySnu{_H4St{FoK(ju=St%OB$3o}T2v^DIf5Yq4gx zIcI{J)9uu0l%>|dcFcW_YaE_s@SfaRBR9Lcv1%LIR6P*_&i1uWgoHk&};C?&{1=yTfBiUVZO)~ho zlMtC&1IV;X2Xxlrm6tPR8NK3sEi2mMl}d#?FW6BtJt~VXx;kXGD zZ2kg&p6O9He`PZAT}p3j!tmsxC0QOz#llWITwkyQ^V8;$u97YmPW;G@cE5>M*H;M5 zbfm~H`>?F&l{91LY_gLd%0`~kfmq#>YI8e~-ymh9G7g}&*M0P?&qusPKT2vhq#xUx znEzQ_vh6*GLQNbWMOcNel*+?w`mUKetEy8F$KAkUxksEg< zR!`)-nIufH-$?CMXArT7?*(%7>E>Wn3gCNt>6}7l!WkOAHb(S!@lovcGQ)H=P0Ds} z#~s%)VWSM^D29)tUvrcwW=9YjZf#;Cj?5RwA3N{yYef*dmUI|YK2|a=@F0ZcedtI3 ztL(0p7H-}cN6o37Sj2lBnJNh^>$x{wzOYq%_#_`&z7-!# zGa!F%h1Foza6{Ttd>X!Yix6(WcV*`t$ZPB|_U3yt>l|HyZ>!{Kg4#5}Z|`L4_U|`y zxN;W_YpW#ce|V?$=wa;dpMcMCa+iiFbJl*N9@X8_pzD#(h0_{x)c353$Wf9AIH8Hh z=A)^Ua}3r;JQjp*UvaTRog#9tOH*YzD}e96T7E}~<2Of=#nPwHKiP`V3l~}B5FO!e zu|2#@&tdLkd8$6*irS1>)aPLs_2KzzGp|cXzdB#&Zj*t#>H9^UuTHFxduayL#jz;( z(!4wIn3@ww#p5}1vTv^B1kX^FY&|Vpn8ICHK55cIzk2u?52YKcYv4A~isp~mMdTSy zqbN%ZxiueJ8#Jlk@I+YmO~#xmPf{6CkM`aAWI6XU5K8wb5*WYaW z;*BsmH-(y)*&@vIE<3ue0tV5%Kij<@MOE&E=T9$oto9NMZfRt}GTPXkYD!Oe&Nuf$ zJ{#_|A3s)pz~t0Z_>?@DOwVZYzOMy}ax^KWB81kR7)A$wMq#5uGMZ&vDD_M#<}VsX zBO5nUZM_qPQqE7lR_4#upo<84I#$KCFXiF7oOrOQV<(@wHQs6_wVic)8Zx?<)Qe!9D_`cW` z!qUa3S;Xf(P_5Jy1|HspEehr&nHa$?CCnnPjNx?s=U<*l=tIwUdcFNE>6FIW& zJ4!rW8cBn_s?nnP6vangvL_z`h1{tVk@WT!Jj>+h#c^kR4&$BF-4S$uVRv$VcLI6$ za)r!}1Z1p;6d!9$XHmhI_YuY&RL;rae>fd&NJA1 zWCM-8pJC9!6 z5z5TFuxjsm?%>-G1ofwG4`VUr_%)`KH5@y;#?WRJ1>n;Yw!HcX)EyMa&u9=et~vm# z-j%GVXeX|#b(tN%L`#ae?Nx=Rx(4M(fs+aOB|Yd6BqS#+0q_1u$&u2t6Jmn$kKsl(|#bJZ!B|H z-jAN2`%0VNb|Wp7A)Mi+LZSP;C@1C-rt6zfKVcZ@+RTBOi66Zh*^FAR75LM$ia+1< zpv7mKXF_@_?Ttkjl_pm$i ziqxyfkw(exf-l>Tr9(f9=R7y#$_s0%Yto?tITNu_R+FUDti;FF_pvX+2ug~6bf$J1 z8S1Kv2O^%~?umbx6sjW}v44&?#(L!TXRCO;dM#b+YURF^F2n~Y!TqWyJ+_+ynIDHC z>!m`A9xlUnxf%3n^ahf>qDfQw+{2^2(uxB@4m3Zch`VfWuzstD(!ORp_R-Uc#hzab zt`9pjno)hrRX4kB+hEiSf`K z_z*6GYGAUZ2Ym?8Bvtu7$WY{WpOWl+P%f%yoMiJ^-WymK!3fEP01mW^Z@^aRq z>|i;XAUh2y9s+i3cNVgy`%vM6!*C9Wq%GshaBbfW)a%HR;qdO{l~{=%wcpvpa6V@$ zF(bnfewfc)1G1()=-k7n7}udK{NA_{sV0MH>?j%fRulE8)YuU71S2Tjf}TrF3LD34Z{KCH)`^(x}Pd{>J2 zttXCD31Z5JOQCMB%R6*dWM0X=wcDOyUxF3qv>ZfvN-FlhHYLkqSNah$lzn{jl3g8F zgp;ubR3cL(&Ec`=y))F2lr2Et!3KY~nbWQ3=dkbaI@t35Z}^fBY&Fv%d0Sb^?EOzT zkf=_lt#}Vld<3CmCQKE^kj!>p>Rjn3n4FfOg^oN0i>VTm{;HI+@GFKq@e254SrS+`YrDN^JvO5VR>9TQYU&WU(4%jGIG8nz;YhunT!bcQL*3gmf{VBi=EN z5&f^YLQdO)76!;8Pkt^*I!0o%ayZR9u8yiLpCp-T)9AlhF+%g6cIZUjm7Z*`fX24| zwBeL2*-SE`J+X;2ByTQ_zmqM_O;5mp0Sd&vrbFE)7YcL7(dUb8IJ-iV`d4svUCLa_ zs#}G-J%-Y-@K{Xh*2$b7PsKZ9Px`wslc_}Q7e)p}p;K@+``+6pG+@7nHJ<2M(hWre?#YcH!{TIS|FJLa`*DNciFSH8ZuG-NHAkxc zKaS4BA;o`LyG8#riC6$>?R*^JNlo@4oxo;!cSs}>^ zp~zcSg?`uf_ZPh0p69vm>pIWl_Xnv&o@Pk^IriN*^ylSJ6z3wforyp@T&6}qW;z6%9ehZRikQjUt0g=9A2gz!3f^}8nsVCC(9?1(bE^qyITngN?m<9VO;2P&{czXFKm>`J`D$d-h2jEJ?uT{_>>0Mum!9jtDW_N#&S7 zRt&#)6K^!rAk7^?J-xiC*Ec)i%>g+IxxzED-7}f(Aq~p0Z9>@>B~kxv0$nZqhFM3y z`1>_ z-sAB)byC$&qhB>KlsdOmoHlU*))XnxKg)E=J5hZBdNtnBo#*FSoeXW(K>9;wu49YNm!#2SvW(XDXUBJ;j3(3@gXVE7< zz^SA^NY>ZkT&aDioKVd48=WZFTbsT;alzr2k#ux{CY31OMMvp8LB4G>`l$Gj;Xh>> z;=2}STSqdPS?k11l|9m@pR9$~N|``=ceXYE0{o@+G<1OmQ;2!U{&Sy6tNS*?KdK9R z4wtaWk=(=TY+k-5*9-Svwy?&UFxY$urhiw`5j4YsCS}TzTkd+cr6V1go)&_J7vI4? z3P9jC&PRSdgxs$@N43omij)qd3srlOx{7yr;t#+!#umA0+#h(s0*Zb0Xn5sctor&G zV}?kPvFtM>)+P8H?JDZNaVM(@1~f?}M^YD5hMM{1>|d?{8U6C6hu?V*cCI5euHS$Y zW!qu;RG&^>vEr<;33T7o3~d*$qRvK-;s#bo2ZbJ%20ULznSAD&$bF{sf{jUO#y%W8 zmID2IZ^cd7X?Sg{N2|@+us`yZ;IhGx#_X{br@1u2Va_BZH;kvMk0WT${6j*V$9p)* z|A)9y`cj#<3RM50JI(GGC0_U%PxAY_^Z7oX&&P(b#YF*hM#BcNYH4`KIgH~BmEmeR zg`UlvMW^n_(MH+#xRLfs;*#?oUtjJJz2`fjIfc6bNBm`blpQ(y$_Wp;55p&2Wt@pC zWqscqXN3uegqX>-a1|7#m;T6SYD} z{n>lxy3@JL@633eHbozdrOTrYsL^X97F+E^y5|eXb>D=k?`wpv#3?j6RGkLzeTfxY z24JF9B5t;sv%@Oh#I(1d<;HwEWL65_?z}^KlV?phx8`^6O6X0x!TRR$-g-aIXJ~iE z$i_0(v)+KL=f1_z&ga6zgdEh)_NP7#GIUBI3@?i^*pLWgQGdctrr9loP4RBPsV!E5 z{ICnS_*aWGe+*)JiypCB?mODTU1WQ=%2Lakg>1=bZ%VM%#?!XAx?{;z_}vUTDp@E=0d|c+f-$# zF3BcefZAY&PfK<~ci}GF=L}PGn>y~OsY2EG_ZYtF3?hdmC@Z?HE8=W8e{L6(2wPwH1(|(OS*5zMjhRRe|tC^ z+&D(6-j8Tm$#&$Q5Dx9(q#Q7mnoC=Sp~t$g=*3^uybWOuV{W3^=?CVnk`?cD#!4~a@05P8OCLV3O@|$@Wy((n0`C~ zsk|$6pobyKWkbpSW+K9W@cGEZRoHUtAbagFgOXJ^o5JollFqkEH7g2uSKOW~G}P&& zuO-cp&7xs;W9Y%)L!ybr2@F?mM6hHIqBti~xr%=-S~8@|okRa)&-qtG(v_{F;dRE5 zZt(qE4eusbS(V_r26udPQ^mQrb69Fk67F&T@kH)(37-DH?u;3Ztj}ca<13_JbTm-X z|0%K_^X@~IKKtMi2g@%{@jo|TL0@C~JpUJWVGMxXa2;B)ek$FsvZlBEPWqZsP}J`# z=B?er`&o|EZWT*5=Zq-V<2Cl$%;$cR^%%Y@pIMCbqvNSNP*9gZ^(qxmztEkuw=2?C z?$3RF_zmIJz_w)~S zv%-nJDaVn)Zj^XhRt#y%M%XgW>3ewrYiD`E+ITM_EJ|@{js=A}J%gI;Q{-el!Uz;% z-QO|jt#MG8KG~BbqZbMB&95X@h0jo-tBrx9&1kr}9hC)K!EMIbxpmbj4o>3CbzAak z@}#{LVKi*hT2>X30@o&8x@Q+a_k@8|P{T9gKMSyQ>>3Q=+}9U=3s88)2GM3q#0L2Y z?oyYh-aOZnIO~qEdcGX(w;wIWiHC4dp%5~~;dH#*ozl~;2o3j|aAZL%23k#JiGv%V z!*dwv70-q6#mh+5?HUgF^WQ^}kC1!yl;FS974n-3F>a*^VkeZDN4<=#EdhGF=z2$>Jz1iH4&i{?VOAK`b((MEe_7{rL6sJKC}MML7W)a zjSg1j33vE8K!)FUs`6w=Z>BvptV-jon^DwbaE2JFUV$%5Ucke5HLcoFg0&HTbYW#P zD)lr;VZR!QGh)ck$rPRzRy3z>EB5D_;iuMJd~+O2UpMqaOvPKa;bjsk7hS=x75q2J z@9HHRreI>qDYko=J57HfhX&g=^uDi4D-XFayGS+6dsmCc)z2|}i7d^FkfC1TQ)%;7 zGn&$SF$=-gw@ zuPJ_iuoIVR#?#<0iga-3LZqv2X3I^L#S4~MlD=~0%rf{hzMiQT8a2>CE%ktX$VzYG$WSw@vM&K+!zdLx`vp0=n%5-97 zI2(S7_q3Z*q@t!L<*jzYtQoJ788DshZFzy9y%L%j;YOF0cJsRf=g)sG!s>tWsJxhq ztoNI6z=LPu>pI}ZyFYD*??Wcx27KKzu`ako80+gx#TR)$p}i*yQM`<7)2YmQyd{lo z8%p}qZX$VypeK^Hl@85~*m;a&Xp*J3fPo0y>cUw%& zXC9(NZ8xHg9O$I+ejMI86$3iviL%8Daddz&=a}$*ROJPs`i&8tq!Hq~NPb7CI*Hte zV>riP1l>7eCl*a=g)prJ_rtHVXx`%wouo*=XWNP2ugxICf>tEHc!Tgp+ng<7?dW3E+Mdexp%HJ(RmWfx`y8pDvt=nWeb*?XY@XT<=-P71Tke@9y_QR<7 zg77qT99_F@K*xl5)r3OMG0fO3OR>{^Ixz zWUgyv{NPM3asB8^pby+ETUpk3Jt{7##p3xvQo}dFXdUiFaxuzO^Jf+`r_W?gTU^DO z3OlxJ$3bt`rXm>5U*#R&nFn)iBl3B+l)2an?A75=s!8|;gXX_zEmvio_aziPRF*ZZ zuVZoly=6U`m*8&xXv*4p5qqxnriHS~^idSpiiN?b815{66fu}APfo%5jz6gR<3nm! z`1fO^D+O+KqAEW3Kd{<~X3mo$_xw;~lzqcPmn`g@Wludanz6R+JUU!HA#mIcSfxsl zIe370U9CU0hzjDh=13Nt^AHze2jIxEel%;$V2a%H4rFIZ7tR!;-eM|zt{c$5B<}^eswwx9s4yfH3EIf7{g~8j|8(p-O0#Snfh0@NWNIs;{4t} z_|eE2jcLoNc;zol4!VG$L$jn&Eq=oOIsE;?XH!?E>Cze=}@Z2As z|56tA@j0j?XI77VsZ0lD&B@C?lk6%(s5&@W{8_|%$0__7j0BiPoWhH_VKgJY19KO4 zVewLV&YtG`g}E!>n`%v+mogA?BLMELoAG$&D9Rdek-3{K6Sh8EiOVx;V5r19=JyJr zquUcUqh!%XO;+f?r9XZS_>ApC4C%4WWVXm89nQ6%khkbOoMrlv^|$vp(pLrS^o+eM%oU_qxvIo{ZwSMlVAF*#o-49@`W$Wl46t{Q5$#*hn=?6<(daAdnW?oI<@~(~=gUTH zNAfT%$#9`aQ(3Azn9Z{rg-qVWl>Ss&NpGgsNnA_waCEh;ppQ+6Yv@a#{O+^Wqrzx# zq%Y0=^c`P~lu2*O*NO??j27AM67^9@EW1kw9U~;L|2LMBXNjn-<+Er0n@r>RNU5NGyP z@9RQAi{E4Jp1b&dwgZ2*KEh$28g%rt6$3a+Pq$^8F#LLqu zL;Oz@Qn4t;9p@QnmhKV1c1}k}QFqGP!auLVi-N?96WjKNiGR=RM*LI${2by>BQFqH z{;m=V@A7`Ht~_18&GR5Vz9Y=%JG8nfikTmlQkBpI*Gu=Ye%&HT)$LV6>VD2xwmySt zuDZ1O+dDRGNep?YMo@)yFS-)Mxm3r_NLEjH$i0c_;-qWF(0XGmQ*)&9n~k4B^zyq-qi455Oa;bOGj zBsjiPBK4@XWR`jt79+;-Zb&nN-!)>*yB=iH5Jj)zCt>;4K6EB!KbVRSCTv@P(rcrs zHtH$c?W*Q}^$+(@&VPw>nVkR0J1GAB9AMp2z%=dn9jlmofKTyz|1mi__gRZg)Q!ZY zOaEcj)guUm7HzKN+~7SEXkm&u$?C>Zy|N8ycf{k?uWfj{M}};!Y{9W{{M^eu#J5uQ z=m^hsMojcU+?ky?n>taLX)B>#%PUa*Gl{nFnc%%qZK&_3NOP5W2BOb#$iCUio?Pd% z^ntx8dz3YnI`E8zq6)S4y^XB_#=?IulX<^>I4!8srZEGSVEeX(64nwS?l!t0$!dxa z>=TY)&fhhXVUjZ#xxtbiKl&+g%k^REz~0VOLOXR;vDa-_rmJ9kC<|FDQ9q+b2q?0Bo|)AZ|*eAqI#^!El27$1M%<* zzPFrcLI)1dVcRd?L0;S^c9EZ@Ke!OZKYNYUzwPPFqg@!XX9*(0jmY_!6|L+Hp_$S@ ztSf0BK01Aek>yaKwqvG{ylffuTD%JzW^19MpDt&o-o%a83_R|8MJ(p|b%pKv^hv5l z#Ra{@AzeKws>Md!{`e+>9&LuPCCFw`D5>h0iUzN~!?AxSzt_I0h}5`;cRKRqY7{Nn zX~fg>MampedKNPBx{?^}06JmojDgKtu)fBMlC$&?*EyaBE}2XJ(cz6PME=c2KO(_p$&!h zRGR7y^N)Gh->)}W99ayXEn&ja{1n>tsXzA>HelblwXpLSu=1CM=)F9Iw##pa^_L>fmT${ma^bx8H`ysmOKu~ z(4ss;>-}OC+(#P= zMF;e0rgaqhFO%ne??5^?=0C{rJLTotVPt$N8Kd~${Yc&|T)l7@5!)+ZTB;4t8T=S? zR)JPHm+^n+OlaK8!^`fnl$yC!%$fEBkIL;t+5b%7m{fxUj(U8)Zbn*b`qM0^ycQN2}?{Z5| zE2@mo<=OvKIBR#i9GtTj%ec#=GeDjuo%kzsl*rJs?KWa4d&~M-r0e-l zg@lAwl%LR|Z6!w~VO5-WZKg&;Cyf%@2d*X-(~ONqpYi&n3^vq!mF|``!nE#ElxQoE zN0BGOmE!2>rj?XnFH5o!7tz@9N!T5q4w=yH;xzkEc30v?QFVH3#n3Tib2}NEx343a z4Smu3PY81e@StO(3IyF3b=YPZCJm0~J+nf0GH@=0!6O$sxqBP!c1xybmvTiLM}PcG zSEN7PPoPsa3mMX3Bv#dPj*uA*Ine@3nFVy2pFwVC_9vZDVOY$c7eYiBI%ZF#bY&~_ zT-C_*M&+UN%QvX3?Mr8kx5Hp_7o90(vNGNC@`C!VK4e@QODj&eayEQ4&Mz!M^<5jP^-n|o7YP-|r;&-dH%;pH z2b1ErW5|y4c(b9qI9n-#Lbvd}xbp(q>{Wz}*|#u1RD*sEZ$WON2%SJpG+p6)JkC|! zkeW^>Ry|}}Rpdy|xE^PXo25dy2HaHmZ~Un$?d!70^EJ}r0v?A^8!QE`b3xr#R zors?lEZW#sq2F0=XwUYgdp@3|u(?JE+x-zQTJ*^@Z4WbD(39eK@XlDEy4WSZl8jxd z5pTxx5&JX{`!ZT+-Q$TjgC9d*tqcD~2ElgaB9hi6(LQ~iTbXqbYa5KY_c{^E6V`}h z4jyLL9GyuxdW03}Or~QqcH-vY6pGCohTFF{OX}szgcb9z2*c7pLjBh!A;{<)da5w) z+ZHg;0i3IzMTS^QPj2rMuaAsCkHA(aUd^V9KlWq9Bu~y860!Jq9UhcXI zI|zjdW@H-gf{3lQbYS*c+8e+*rST2$`5uTx_NS1TXC)rG=uhh0g;z0q3FrSjMsoIX zBu>$wZbd3|-R}^;5BswfDZRMsK%L63q`}^MISp1-q$YcLs@>%%(OYN-ecvZeR}#N^mAC&M7(9 zUXEv_X|U2SMp&E*IVWxwho*9mNPio0?HV$jc&_Le3? zu+K%Wjy5repBKgDW>j!Sor1p%7Mpu^qr$df;@l}85tL{E)iW{9 zz`O69bwBRgSaSRG1(ip2X!4Q{bTgbu8P{T<>SIkwV^*Oy(Hb=mSHP<{meR9zaK23g zCj7y)b3iNpyy5dV$#xX`D+$NvPojCd(j?XYhT%rA6*;Vzr5`Panf!aA!Mfy~k7f&ai4X)$ME`6ezT``SWuWmG}Iy&I`clcPNw3$Um>h&|oaozI5kC_15W`1iQBVmRCHA1fq`^QV6!73umFJO2IMAU!$ThgA1Y5mguGqxSwS7J5CKU3Lki z)N}jLX6Qi{Mhay9I0*0hE=IO+N4k24HmO`+fVsNLwBW~R8vnfoV!jex8OW%r#{`(@ zgwYl4r!dbe#+<2VU@qeb9sYc|-{S?2>O8?yKSOqnGce-9OsOdDjObCKNw5AJFTRej z#nyuR7@61?dD(_EXP-59*1U%4MeZPzDTDIK(>UqxL>>Ek(xz_uV*i5E!pU1x@rL^z zGJa1My5E>X<=YL|@=ng!D(KC^V+`okoXzmbnT6DDGMCpq34r%M&eM?9qEnsz;s%R0 z+}|`$eD|i3&p#Ow8%EO81%9OAB_}EjUkeP`g_q$tuc|=bme@?iD6Z=kCcaWQu+6 zCa~XEyvfvmx1=zhpVoJ6;`5SCR3EQ~zdpGv=&u94-gQ{|NKuy7cf@(?mR`o|vEKB! z?Ig-PZD{xOEmZh;4d0C~7uWH9uG|bKYFKd$kE2SVTsf3Fy#%=T>qY%@?!#7f4f(yE z4qL^4aGyCEw)RF?eO&{aGRM#$&VC-U-XC|0@{syWhOFc4Y0ko(s2^*=Vsgu*kMfNX z@OnPN8qDcZk`~=+oyqF%?L@zgI+Xgmke};8`+4W-r-db(yjh#~0Ar|jl|PM52*n`I z<62+pLOxNeInyYJ{MTjDEq*3=aPtc^$ED+&^bAZ&M8VT6jGpNW81Z`r6+PLB^XvtF z7%0)+^)Zn0%6RmrYhg{FJ&qM%R1kzCS(5yeX6=vXER&6lzVgBFv_H+9a$ z;5>#Eovi9C@4Wt3EyPDgAhJ4_XOGWHt_>^1--CTOA8{0YbB5EPeoAyQdJdAGN8xL@y?3m>Jn84f zq0+b=&zr_mMu-gk;&V54DpJ_+CK3ls_PVY*b!Jb=+hH}Tya>V0-cwQ6NkE*LDXzSz@4#B{G>r86&55NdIvG)+B7-51SyvM z|A%|#wPn?4)hkDu82&~0(-w$~t$ar8??U+_BPdDLR5He}46>agn8^WaI)3&TA}_|l zGwzERv?>8JEYzrWVlS%d=O?av`xOHUc8Kj==Q+E2F$OI6qr;`{^yYDw(3!_m1YTAWGe?mFWl@yunu zws?A9j?^UeBhoroNDV5AaOfiEb)LJ3=#zuV$|Q@PsjR20zH3Aavlv`FWJe>rZQ|dt zYP6VmP|CM^_%T3-Hr9T|rMfiwNl|z#cB9r6fmm8`iJkLO#^NU<=+-qOY1XA|Y3gRN6{5vD1!AD-s9NxGU?$sC3Spd-0Zai(M<3Bm~id%H24n z=1rAqoPCz4hao{bvC&Og>U@H;a1MDwk8_C}?E^^v-8Xc6lB4Peqv+088w^*SNOH%o z;_~2aXt~S%Jr>*vl`)v)Bmzv@-eOa&J`3UdEQOxBJZp1M{Ay`TJp*`#XV@pUa?)k= zIAMYCQQA}&VM7XKoj9tdPD@Nqq9gte;&=C>?y*WV%+`S1H^d1InH%uR(}+R~p9%j( zS;1a!A{~o*iLnW%yf0K3Q$(L72&_m!zScMK%ZxA_c*FBp;o6jKG*0X{u?BIY$BDlN zRl{DzAGd=ilh*zK+SoNr40(JVBh_^2*BBYm)X1D}-Z!VjTr2V5l~weZpB76bZ8~{PvrUMn3ov+x}T_hHv=t?6UE1QMQrSv z;gmG?mUQZL56OYyJP$A+3w29e5r6j(>o#t@V7$CTvT+Lcm&Tov8cw~6Rw>^XaK~VY zvm@=v*-3_vlE^h3^)hWT{cEBG3FTSV1nW3$U7V zd>cCF;nGkCocQF!^WszKb-4|``}yHF?_$aBQXt*Ywsdq$0nBS%rS46`$!V%SnmyKH z8GZYsr?@$zCtYz+AgRefc30hqtUpFmqN^cA|6Yqnjm6wu zr$&a$_rZUQx;UnIDXkmYhaLoW;BUb~d^>am$A7taYi{$TeYQC;nZ1I-H*Cd^-c7LL z-ImWIdB&&V0@kd*&)nVr*IVyHVwpR(ch#|iE1KjXZA8t|{ZeTqcWwU!4VUozvu+gn z9KOSBeXPU-a!({3nSRVfeuH}Bp%lHwjMPJ%3dtWl~XSzJ+^?gg4rKM_2==CWkhal9=uJ(+XLLzLC$X zGIp@)(x=!z`!%dLd5GJ$%%fzrX84@1hIi;L>9B)`1m*lctSRFFzHt7B`+8rvriDY^+%k;MdVVp@#-s|jA?x9E z;~olCbm_%(DW+#UWgnZXSwr4N;bFr7+$iL(GY?ZL-Jy(uSC=Ab&KnpQRzo(;g6hY% zLFQ{NQ)< z>Ltm1;zz>p1K2ueE#3UI2*-KPR8F=#CCD1k`i$pTJgg@II5%QT7iY`%TShnJQ-C;xH+gV zZzLv#n0|C6T9FgzH$>OAQ`~yoEKx5?ghj;o6+WKo|7yxM(~?d=F#Cy$E)lxxVsv4 zD^RBD=2BK(5DwEj&FGrN(VMlZ&&=Ow4ZaB?>yno-kN*N z=)9q*9;75#uIR)2ASIj`Kb?JG6&P{Zh+aK15dzH)c$vvYQGLvJbX@<8=k*&UeQu1T z9JdY3FT@hZnt&Hq3CV{hlgFK0+~2H68Mpa<>Gc^ljl26o)K5wKU?3?;`k>y4Gch^; zXSLA-bdPqU`auI}-}}ROmTE_%dz?UZR5Wy4Z$Zq;Kx2d%sTlI?S+^_D9PkXqrSGvb z^a$>!Xo;3OL;`1=-`RPPDNMS6FVq_e7Yu1S&)_LMzk?M4##Hq;56gCMMD0lvs!}zl zJuZ9)8hDiztlEt}MePX107=XKO@jTZh2#{x2V*={G2fN@-(T)Pk(xRF-LDom^S;QD z4xWF`RHK7u3xwAV#$?)JC~n#E7#ciVJ?dTn=QoX}57ye^l=&Zde&RR&jepO+Yuv)! zbDfy^DO7yaa~35k$Wy)h15BOUQ`q;wn|{RGVy5e1OpLapptl1t@&S^XeE_B%Kgu++0H+PWPvTtminFv>H#}U&CSFAaVa% zKYAFo9c`EAQlQ=?e&!WXRAET+e6FJUssfAu<+F4Pb&69{rt-o9Wb=K%9f>-%%XZ*U zuLPmIaUj^zAllHXL^n1EA#GzJn;$ew^r~Agj9sUIy5|QF-L+UUF^!)&e%g>tP6l%k zf3fR@qqwI;mQJYmp;FD~l21#=kvs3U_8wG(I@_%B#IMoJy?Y3)ntc#OVUCn>{U7?i z{lqLcEkla-RN)tQGMO|bp=PQYy=Waqiu@Tgl(Q5Dc^|N8+6G261A3;0xAfl&K zV)amcxZfK_&3tZn02ko7wFi>g&%@VNm4x3pqHMYx>FgXOCfb|8*6K6fTRvx+XN_rQ z+hB^ddWS1_v?wp87)8S`BB-Yw)lawOIm-h=toa-1X7MVL+_XqVrHAzTmGSgtx-s;2 z-9d!BhUDrN&YXAIit%%H!&-{F*oA#t4)N#vUm-AI1>WgriR(3kn4U7tMK*Q{vuSO)6k(rUq54%X8vp4hyk1no`HMP= z;?(I@yc|9Ln?>_;PO#Z?wdrrlD{Mb6E8MiU#fa}?$UUMPt$vh@C;Rf*jvSuzXENf< z9sRI()>a$~elDH)^$_2Wn-Mqiu%-GFX_L`#8nWdN)_T~`cZ-eu%o9PaleNVY!!E+_ z;Xui)O~$Mx&X*!W&S2sy4@x*BPcxm>@yaC-^=@fWeS0mcEQ&x@xDs7kFqYm8P^IZ+ zx-@pm(Q>uN!D!5mBmeamA*l12chPyAToVN2fBy7#U=!arJ;KnZM$mtjkDZpiXk6cI z;;3a!kUi=qYNXY$vR&U0`>rR3-qfYMGuG7p=`Cj28<5y}7HrmDBr7=4rTNz6^osZ0 zTa<;dHFq%JU~iI~2%tq9M^MVgcBXx<8h)EsNtPw_r+;S)vEq9+Jn&YO;hAU+CQp8j zS`?&vLP!(-K;z+Z@l@Mm6cAk5E6sGovf%`)Y|EWN`t6j0JFo|C0B-7;Msub(81v6K> ziWT$cL4U$pF-#?&&GK-dA=j?4+nvE=IBhrPDQ_gLWq$Devp}kMa)7VsM`RYO)4<#FylVe2^6wl8(06RB4;@FMmsiRF8F!pO$EkZ zKE(d{Zf8NSCs5?RdN!l~U|Mpr5)B7q*f5)1{A&9Pvn_jJ)Zc_gx$Dun{P8q2#+wdI zT}Fz}?P=ZYAPhOenbYA`w5r(`o$Ejajn8nT*ATe)TtE-58&~e+nv{kT zsB)(Kw!2R-L7^1?z1O7ig(|d2{UBT?$FQ1rhGg5ox!TRULA__vrHOyx*CtDol;woZ z#5mk-38TQfy~zH-c%*$#W*=vh_^afzbluH&?524JKCK(YW=J1nTe2lN7=*ADKdYEa z%0!y{r3XDI(xb02i@ZI5O{MpN^F_Tr#c1vDWYwBeBu%rYP}lL3kRQ#N+mruckV*ji zH*!1He65lm%+x1)+wm~I*OPuI@s8HR-$*;rjb=^>pr`M=a2R29XL$prx@|^IQaw&D z)WR7)pPa7v2UvFrU2+b1bp0NRerZs}(w*YEp*^T6){LT+)$wxB17!PbW!78tY5rRY zP1X4e3BQBi-nbV&$L~SEy+0Yw>P>47^`pkn50Y+QFTl@UlQd0Fc-svg#pHgiA{qKedav}4c=MJ)Ka5*gzik8Zvf?p)% zbn8W;**9p|y2EtUPB=U-kjz^S$}Qdk9quf7Imaho7dgYr2D& z=7!IlOL#=EP&kQH*{8I?8abP z_NG6b2|0)JV|&m!p6eSka3pT&X5no54n)p0B7={OaCH}Otp5l8jy{WP!zSbC#yDZ6 zw+mTyJrR~PZf1e2ZXx@5IjeN+M?niEw14Iu-0;z(jU}&5nA2iQi<1w*|KD2d`;ji9e=vFpT2$k# zMvKZ7#M=+l=vJ+lINq!rgC1_jye$*RRyv$&)~kvmf634fuTIW=9mk$et-(8s_i!P5 z@zaJ`G&kubuxSZ`>!WR`bK|X<5j$HK*!EPw4Jtq^227clk3R{dFxirgj$s z4BvnsYQ#3{<@nfWMJ-2lQ4%zk+U~DI@!2IbOM4@dcJ^Zx<}*mX-9>bX`GxV_nxrn) zCD?Y%fd>4Mr~gjs(WyI`blfq7jvZJpsxB|a*T(-~l)eD%NjY$68%Vm({~%_z9t{{N zPmOQGXk^u7JePBz312h$e5*V5uRe^?C4PK2Wen%^u`I|T?f*MTp4xg;=~fG83k~er zd5Yb4>MjHi*TjZy4S4!pi^|QCaYk&vgaM;5)$P_}2*O7%2GJ6mjM+ zcixXzrnU-oNoOsoo8e6sM;@$GMxI*SiUsw*m3VSTp9-3Gvxm{+XaxgRU;2%r z3v%@Kl)1#<0;7EvPW)DTodqAygmMrtTr+@HYQDo`7Xw;tp+ZMBEztOpyGA<|r6%5t zWO6s*jPDLFA~){b801c)=L>Aa-%{AFUWSUi!K9hJ4SstA z@#RpO_|i5W)3{&UeW4b0v%4>K%`;&R&rk zmNcRDQ5c0j`pif2o1B{8vTC>>k+r61f_Sw!5hn&1(Vi7b)I0fsV7uLjdDMcSHr zWLTF-=ZO92KHn8Zb#BChqfQjndjR$*@EmN!Y+55x1@o+emGQTC9a?S0=6L&Dopr3SL$uey*Zp! z)7_lhYB$sIk5g%$SB`kDWe=3%DQ-yBD$?RoZ4 z&4a=_k71d96O&imj-!Rc$^Bgg(`nnxVp@-2>gy-yv$!Ycx`^1s`QY&%rcrqR0~L>I zJ@Ak}vo{XvNs~5iWzUkrkiPjj)^bj*$8t5Yvf~-36?d72vJp+ui=_)S`qbH(1#jMY zU2OW2pJmdK6xJX-*&ahfHF}alLpzLB1Cb`bAKUi6lGyXS`7Wn)>^EOYsre@mb@LmP zOBE=iQ=8UYEW-0oY3x<6f#kT`io&1K|8aC4ZaKd1A8%+68d9kw?NX_<=(*l!Nyy45 zD-wn5>`^2#LZx9vB{LDJPf0!Z9a-5DA{AMYC@TrS>-+ly90$j9yYK5d&-eTFf`?iq zn<(m1SI{H)raB5)e3o0s{k#KgoAJPO4g7Npn8rvqF=^y^iM%zj%|CBr-r~W++nJnA z%$YEe$$i)d1K#5@3#Mx)UgK9#D<Yon ze;x-lhQlW!88cRKuQt!DJu<7q)%!Q`ecfBUPCtjx$WR1n$cRH`cu?q}o}%%pB9^2i zBKqemwrYe4^~v<6A7}Zysi!d|PN_iorzx<0Voa}iN4LDmkLB$j%@*ZlLw~gzsXN7$ zP1+_Q%6h;St-g=HXSU!_fE9(9A4bBGDNst-EzWolgf1f^s^AQu`YUSU979cNK0HwD zzqtb9?qsOyO{0F}`JO&hDs<$2!*9b5Ec!E+~Y_&jA-6(?#dk3*8>$t}=?*sCUJY4=^8FY zr$JNOfh;a9Alc`pbm!DB+;-f6a2d{kZl1=>!}vM8Y$ujxY@))WY3O&M1InZSVo!Px zO7FT3^0>@olls$8R*_Ar<=DJk4si{I^|4b7%JwAdkqx~pTnnq(zdeHtI4MIQm5@`$zoOTfjZ(LJ2Zdp3v-z`Ef>Z9jY5CXQi2BHVnpSh^!&&wJ zcZh%VzgS*h{Sfx2;>07z!jb#Tm5vYSfz&1A=@Lu9cGD%aPkR#NxNE~Bw1frRH4x=y z|H9e_`-BZYPvFgYAM*P68zD~}NxZ=O@BGj8_%Kck822B{+@9mh*Nx<@dj>ZHe98V$ z6OM$bk^B)wT04z*m92WCA=Hxcx9^6S?+NqgDX959iV7DRfx6#hBc^UbV8?x^8}_4r zi>f%U%mq*7YMIGyH@e|rf?%~yq}))VeL1PjuDdfg(|QG&J`>@?-L}tqb05y*9^@V^Pw&e5qTMG6Uiyl{$)C!W9fiMr#w&mpE^%{^C1$9q>={Kt%(OH85AFqvE$l_>6ofIpHf zp}R~PYFAF6$9%4G=fV#BFv@2qH=2l3!!?DAOYB+gyDM1vxJI~@!}B4Wi?v`}u(!#h z0G4GLMlDO;VCSFbDCJzAW-)-W#@=K;PFCoPW$-cBf&51kX?t-IRAuyNxVk(III*6s z$eD}V+qX-97ZcreABTjLdq@fjC68~Hp*h-{)VS-WGA#$+4rox|>Z6ExWP*lKX&4u} z69F@fDRRYkIKJl&$M1J>r&7RRnP_-?{43aZ4W+_C}g?El{VyODbSsHw8nTjOa>^9qs)%ndaQCVXE=R(8#-{BWpb+Mgv==ef5^m>po2| z)jSNx)7HEvl?V6i#R!gEAg23nMz3L+#WCbdoCgq&R~td(FCivu0rPrbBdi8gjN1~ zkjvdP%Xa8u)bQc7~_g`%uTL+f410xwNY# z3SE1qWApo2_-3EQHi=$PebhsYu;zWhFXd$w{!(n8>OlXyx2!X^pwkl*X=c$JN|fI& z>YUsS$6GPR-G?76u+^MByPV+RgG&zU3BMYa320zV0k

8_D17ES@Pzxp;`@_~) zABN<=U-&ZcEC%sigxu*~L~E*9tC=&!*36*Ei7u2@Ksfw99>;H4Q026l*z43=aH@@_ zI}2S%VZ?jhCtD5gr2Ft4=`7AP9!KlH9N>PO6%_nV#4{Nws-_!r7pe-qi4?KsR3VF% z*XH?a?#sEGh$FKHAfZEn%oZzBPiIr%^Ltk`-wUGoi7J%hIu)<8-I;>p2yxS+kJ6E2 zPO=cMLbNS^w3s8&7;_`2ubK|kB3ONUm8V!l>Nm^ddD%& zOO~1TQ)FRnBPnP60XS#&r)0igSY&R1FA?itQnXW$;rk$^!!uyXGtX{2eJSf*Gn{O6 zsD{t8WJb(D!O39q8YDvb?Q!(q#dBLz&9LLZaGpkyrIfbo*!fd|&tv#}AXbx9l5)gp zj-Ak$G+3_jj{&RS$_!@U(1ooyVzZ6=Ct}qj!^em zSukz5j$3YebjmnXsuw?t!Y{mI>Uj?^-+Mc2>uo@#P8qQE+m5w~-^8Pp%kX@G27US> zN5*;@;su_8O%Cx8TX`=(ptdjCmJcUpKb+qs>jhPlyT}exp-1lOf<`R&oBvj)d;2Gf zvI}CUo^@bq<68vG5!s26dxWza&Cu|*1drG2Q0HJP?puna9Hlj+Fh-Sv51qhg^Y20r zkF7{`*&|N9W6s7-7))Q?v{=xBv1F~b7iR|VphKbr;^QNdPE!fh@(#?$9v`tmHc{H3 zR0wf}7sa2+!zXV$T2PS4`KK{-W@?=1`h6(IC}rAIScAq7K84ClyKZ{GJ*_d(c#p&sQagn_{bfoTmA7_RrSx>r zRon`DhNzx~R6g}9wEk5>afmt`^>nCivnq*^yXfS0B{cE(zxbvV19bLE)pt2y_s_}H z7SfF+!pUcrC>~5r?gp17I*)R^`LFx-{4*E zM7oCkqz>JKDWa~sc)LIEv_@W#ctuMkHPd{kpYl1LQ{${1?!RdEbH?8Y?k@(Wg8@)7?_Z^W1b9HWfzHyGog7m3xbZYU}B@v=N63YVcru8&lDJD9G+%*cN>kc4Zn=D$@sJ z6F7q|IfiE=ITvbm3M{SRaz-~GezpP5Me`%geZ z`oHp;Z>I!C9|XDhPUz0rBqW<;V8=rr8u?!_l5{<&=k8r}-E$Mw#H5K)#^Z3`=pQa@ z-9z4vr?6$G7Y&^Kgu8Q8XhiFKxGs*RbB$pr=DydSClX=ahij*LT9_fbeX07lDGTbenJMK8~(P|rvD^fm3GG?TLg#lm@FKcjN+4GY`9d#<#2SrE-; z85sG5pWC^oXvX+rmSCBR_P3o<*Dzh`Ki?8}=Bv@?Pu|pTbUVIQE6_iOVWe}mKjwD@ z(kOm+s2Grd?uFM8KEMcPbG@jss|!26@xDVpTZ~)FJ=foRa$f8SG4HqnB~)5a<_kqw z9Vs*sJgex>5b;qJ;q+&9|jD=er~u4qrn=fG#Pw zrwUu$nwVaB1c{H|;?jg8l5X6a@W3bz_v7}#>UXo~G$$5zgShj!TAieKhKmRHcO(6u z0phQ?U)URBh@vk%FT;I0S?BD;9g3H+rKcwOz0DL>EbdERM{?(r-b_)>W+lbh$&lBh zU(lbgjm#B{!kb3p@39MT&DEuW=$T?XVQdogcf|&5)fqZ@&o{;v$rK`?7zJUzbOm&=ERRJ5kg-R+^q#iU*4wsdDXQ zEafiUK}Yt|F1_`nm9j@nO*O?VGd()+;5>wYY>0Z}$@y<9ULET}nm$)hw{Zde@f-;~ z-@eqGkOWU#XIyYw3$uazIXW^1>XlZw)OZRWyWXOj(4aWwApJ4q7n zh$Zv$U3_qF8gy(e3zCe7iF`FGwN)dYS85^{D0PXQ`rb{m;Lf*vyo(~Plx8NeW-903A8y19>CUz9y{S9*# z=iv2V1(f*Ni;>4xu}o*XG9Nv8ifC6w!{g&n*(Fc2u4`cJ+xP6{x6yRuuN-9$;jG}s zHxi|X+>hBT!NulXtj%@;JoC0fcOd8W&v*ulGn!PatU@7)Elk5b5=wn{Nv4f=r=cEW zF{kw{7O0G)_f_R!^3J4{;7&`X&qtJ}J{?_F0t3l>er_y6+n5xL>SjgbKc2$ooN6TQ z`hwT3=lJ<(H6~XL6g+eX5f;jdR%!3q#o^`n&)*EAx0=x({@pLH{12axT2jXJqsT~g zgyS7!dfeti@wbjif?WzF?;plO_53%ev}6rl>BuPc>SNY?p$+RRC!q0BFYb9eg>_xC zIXk38nDxRPF`Ii*ul3sGTcaVm_~_8`4Vq%DN&_}MEJNtmDU@X+q2^zAg%SPRaklIu z-i+EQku5Jkajhb0O)C~49I009FkwWECqmyX!>+%kJpaRez9v@G zH+Cxh{U}GXc~5vv!BJ`Tn%DfkJG!ky#S_IqzRSc2}CEJ%H-8a3S3<1@rvBuO4Y#+|c7g`Zc@xKxSs^uDmL z_vLuB+MX7K%h0}94cfm=g?_yrPj?1{W8yjsx;k+eVy6FO3JGUmG{uK5`F5~BkAtKu zpTr^ocQJoR4;r_z4H1^QIR9e{Tc70y4nOWfGL!TDWB-%%aF0b;?i0NE@E!B+ zI?y4lR&^#1&4x^to*=}ULxivA&ta;d}QKPTYjZ%Y?>9&osEFIxKdEvz^b zf8~099<6=Ak}XG){lj=n-MX4i{;ELyhEE7R-HfJ@M)Zj1T?}V+!!T1zx_;1x+CIn7 zA-ns0KWa{S4%eajOH!_1ACJ4Cwv?mr7xT9AS!kXE-mdCL(|%oHV{@$7S_=(&9QBF~ znkzyfk!PgSpR;z$X>`@xmx4)=5=>R;mAjTCnz_)VOS7=*=t&lp7Kgx5lM#~aOF{eI zVaiSg`V#UB+UzsCEgb-Zkk0b0YuxBbj58i@mL+fAoj=k`icRVLNq%&v@E|uB3;#2w zeTxp^!GocwK3oKAb_UA*2a@Sxo&_7<2>AWBaSopsD~e&ex!VqGhEPXCupea7rc~^`6f3?DuV?HW|ofMN^lwskz1{n0J z#7}wtPO9B5Y%+GDCqc&aa_crG9J-6yN-ns|J>d7Jd(u^{0-U{MNC^ffuupvgLIZWU z>(P@QRCC@~_IB28uoq4bzF=gBsw7tHu=M+jrPLDNfo>&R5m2W~N#~EjsVxzjca8{J zn)A8e+lapDY0#7d-lFZf?qsswRct8k#Qy&-!XjflZEN+Ty9ZRnlCTCGoY;=|URzjE zMJAr}UaG@*194f}eE#n4MDs6oGMj$OTmOzD9gWw*<=PognPyA%gTi3L8AZD1!|4!b zHMLlM#>}YG<%3h-VzE)AX#9K|ytp@X{{wd{xTVc*_V+~AMprD_$lqTF^4QrlIqdkZ zDbD-Z0ONjc<=zR^e1^fFHSReYIIs^%cPG%l>R{Tqh{R^zyYL+N6RmyEv8y#l&_Yg> z!1Mp#w*JPTGb(g>R{&k}3`9e?Gu4;4yF(#4nRs zozqa#S(6O8fAi^I#Vt&^@d>v+{lQ>$MOuDsC$>j7GSdcA5@H6@y}s7i*UXtkE4dfQ zhu;OK8VUNjtN3iqhK}j{z>;vz9~-ilyLRldK z65EfSG0uOk6LII8DFsKBU~1L^B)qqx?w1{CC@YgJFp^{AwKn3(^q;tO{;RZ6bvPMF z8`y%{3yqi^18Fk?9q} zfKmQfp_q;xnw--Z5`&w)_2|OiVA^?4fxd@a#aWZD-v9i&AZQH|S+Cg`EpJBa^ZMhU zLjY~vyb?ioqey#J61Mp5U{fk1Na=~J7{j@?zJ`qbym}p1;Yzw$3N)jCU-D|-LLNUD zZM2IJ+rm2jKN~f#7xzZ}IE&}J+qpxd18z^X>1T2aLX*dkdBq?sYO|tsR_EYS?Tll4 zDly%c`)-wt@MiyC$L;CZ}L;2NHL594BmMmz7&0LgHj870H0dB(<)8$ePjd7@GK|)b;cVt~ zNAmBZA^J50BZ0eSHhJtt!K*=--FgJw4pd^jh9ljc%Gv%W|6x$ZH}q^Q#MGUSS@p4a zAtuw68Xs~~_!(uSY`TU6BNpRpfGNpxpNJwA;NpdTR5U6BCh?nKf6#?orn}S4t>Y+N zcaG%I=+#JbQl-`#0hC?DC~j6K&u*9Fj>!moE$%^MCWP}ntsTNA*@}tbQ<0TqMk)Di zsNOS4cyL~c{(JRUI9%KWpdaFz7CU?b)m{;GxI+93J)zlV%wV` z!nKRhl<~6>I(*hS*-~Ek9=%uiHOmuWF6S|pXI(^U%pdu|fuzX$FkPKdv~UA=SQptaaYrJHEmjd#k9aXQF zJD*`}A*iSiXUA}!yP14Ejo?1W+gl*Ep5PvWS!VpZI-8W_N};^NmFSWTdEV$wk`MP# zSwEFFb~8a%V_%YT-ay9hHg;!a6^6$9QNmeWd&{~Aq|3`@M; zNM5P9)1;|2?9J|0eAVwoYQrx`a-*kWc=vh~@O@{5cTYO(tUwd)2hpdZzBFl?g#5Sb z(h0+G%v+s}>wzyZf95dkdBVM4hU2L%R-3fPw&3?dQw)bY%I_3P9^MT`-({04pxVvfs;XXm5lADLN0vdYkdIWq(gnXgrPhAv%&fW5;8| zWEUEpu1E`6E*7pj!`2Rw5ffG#u_-l^g(-Y?vL)~%3**`I^}NFs^JyEiOV4M0ua5iw zESmpMSu!}}%ofh}rxW#U?A9cEcpvtKUi1<;t{g^t9C=o4V^6yI{4X?Z@3WSx*|4a$ z5qcDPk?i7sY`(M}b_!GI$DTGAzICVMViy|E@AhRpV<|iP3|h zXuwMx+t+y9tXLFAVfBk(PKmP&x0Kz+*47ecpuke?8GMUXPM_kFve#3N-d< z)6^fuP^cM?XvG-) z#cy?}*ein02WpV{<|}v|Y%X-&`U1~s(?qBB>yi9Um!4<#Li`L3;U~{S?lqo({eBTR z^!ETuJE(^zek$Uh%yL|RuggC7uf?7s8=5%b4~&mkQc`;~nYH=Rq-9=W%dK~?t<|6a z`Gf4VY6>!z*pYR(Jk>a;QKP;b9a=DgjJ#}kcWofO6H3ug=!VGt$DuujXl=2`bRW)P z5&zC2arbx3IoXcH!dq}#{ff;{52oJDj^3Aw?O_`I5^qXAanD9KG%riQ`I(=QaWe<6 z!mKFqaW9g7>xXe8ZE5o61=O|5j;bW1us!M^`aJAI*S0J}cBHGIHGT{2U2jMJzw5c{ zbsNkaE8x9JTio-G`^xM0V_oi2-d`_4bVxq(hW4b^I3)_^-Y0FX95&*S66us{QSzmO z7^tQXBMW89;n~<%ZCXOw%U^7)?0B*>R^=SkiMVrb8yk9eiFk3ucWKRx>8$NtCX&63 zndXbr_{n$F$(bA3`-XRH*7ZR88`_PG`F<$w;s|y)aRk*U%oc|ZA)Q8`0U> z4l8r+qrx zMn3ueGRr^%hCwhaU9Xl<$luju9=~)=_z{ z&(PiY9%?Ng+2(EPg0+tper8?9K|2*X)?+B|8ii9p@;d4{T#-0rA0y-x#Oyoq*yWil zwhJN5;-m`|DcZ5u2ggy_zHP|-x`pQTa)sfG5p4KB?#ogv7d(%>z!+y4sbBA#{QKd> z^SNhnHEkd*aM(*tmtx6p{t?kWDG)o0Rmf)h3C=!DL*>UIB;WZI&7X`(J*^eYCyLr7 z378x1NO8+T@N}jMJgUZH?i|iQUfUN2cjWPQ&H*g);9lM-mb7>59<0pMV@5ZRu>GDJ z*tNW#Fx+EGXZkD9xh*f*o9sQXv6H8JVkZ7maHbtoRq5FR2?F>tA|pi7>?a;XFP7rV z_A|(owWdBi2b2~F5=yrbjQE}Hc?Slc-V256+;vyjDEM@nM%7CBP~?4vjqRr~I`9q- zoYo|RQ|$=teFaL6>U>u&Pc3em+&;x#taZN9hFggB!F_?s}gU_ zqxs}k7O+Q+T7DJ_{X9G0!6H{MIq0QWb^DnkHWvR&3`zebNt=JUP#Zv)Z%!b)0L{tzY~xC1ys5ndHDgruJX@oC31bm_@ahQU~T>9>sb zDQ~8EJJd)S`IsSf5u2XI;dH?o@%h+OEahH*YSk}el|eHod3ie2-fSgF*I2}dPmy%3 zEEdA8YXq+y@9=d)oG_#6JYMp%Xi?@Rb^eg@KAax?7xN44xA?9)bra0^S>|ZoI{cY>iCHB&p&@JxEtPF%^G)PX+jAHE z>c65Y+L^9j&xP!a-ORjT5&LDxT}oAN*vU_Jbg1hYz9^qz?jKXpQ1BJ|-tNM{>%FPw zj{!BehSR;efiym1J*~>LqGIiF(9X?+nI-R5@At*Sehy^rwTZfo>`#&BxD&%77*C2X z^4+MeD01(^Ov~dCe??KkgU7tveF3-jtCQv|H5xmG=Vv}|VGo0~>Cp^Dy4t|chd7Ps||s$;uSZ{gv{gYb2G0uPdoW*QNnfZgU+2?s}8i zkqnZxh^CiA%f!%JXWqZ!4!V*YjHo*WZRbGRl>7zfxPNNI;7;t_wUE|khoOX7Q~11C zeGbIgWKrpOkp65(y#l&aWKlzc=YuF|zX1S3~%x{FBnvZ934Qx-$ z60t~qTlvCYWzyTuoQ*$hC8r)1!`auI;@U?s<+7?`bTcUIybJBPn29A_cI3sq>=#p`U|h?c_O$`nz0HCel^gKc{w~&@t3X}qN8Z1C zhOHsLg<%mMbT0jeaCy}rW*1e8&z1(T{%uLQ%iL)8={l@2GvmIoWCWc!0HYccDqZZx z^Vgf$VfUA;!kuSPcs}a44bKYO5zSj*j85fpd~vhHai`uCta2K@g|X0l^I6<-DF})^ zdQnNL9KF2qK=?FAg`P&(h{H;5V*CCu?lttM9}&Yz`-7fP93@L@|ESTW;qu=5-4)4T z>`%Ve9VfQcN7Lg`KcJBF7VgtE1`S)5THa;gjfZImF?@D+k_)%S;i0p6kA4>4Y01*^ z?l)j}C8}KI(OYPx?G&4=hC&i+LB81>S$;EA#KhZB>jVaXQZ zwftB32VRsulDmzC22OM)SBcK@8Or$~yQpXWEE4Q<#8WBTU>NlW4U={uv-uQG^8DVE zZhR-xszL1^HOZ!XB*iBz#^O~TG`(dVYIZB3xBq-Z1`MIecBk2d_w~YE{Wzp9YlQ!0 zJ$gUoEX=OtFpWp$?2G$UX1Q+->=(bqDivcoFwKTl-rf!6YMw6*x`*35i&XE|fpTw4 zjLy?1%`elby@K;yf6rQ(;empaxyQMsL;=T+6WKZKrV5d~i zKA2LHaxlwx6S>e02qDjSAoLro|7j5WR1NPI4Q$+MO4eRIY3{}-?m!>M^3AR2?4Bxo zb<33uY*~Qk{|%sBDnBs1a}UPHRIcCH;iiai`d)?f;mAE9Yqp98HIMN|E``jAVAm zQ-HD_)~4HGXYE-bx7mU2_RT`V4<*j~9zn_v`HU{plQOGh#ck8KV@KRT`r%%LG0%Da zBJ2c`X%V8$oycuYEuNabfjH+Ozw%TJa>7=?YV}EL*CfE z#E4{`+EMAn`&eDnj|y~ohbKH6eK;rAajrMnuLz|+kE@vf;JrxDHl-@lF;w2n$aIn_ zHp_}AT;>7x%bH%xRN)x!R{0lx7VA@&V^OU-X-DzCe(`pp-z0frhl54GoIN{?LC6Bc!t>T;Xee<=bTQTm2`7T9D=qpvYNaDE5E*C zWrsX4cI6N{rdq`&eIFxq-?kAsDj)Hz+MG^16~VWmi}?qRXFqr*eu`TtT6=WjGoQVh zIlPhV+_M>D)81i}aWO6r(xdG&)OhiE4$T{DL*xACQQxI~Xhpz0^!<{6+$C-BmRpV< z53Q+pc!a%L%yzLo#crB7zv_7Iw^*=1T@Q~Sk2_Vg` zIB9N~GXmEYG8;AnADX!%ASn&kew)!XKKod|eJC4cyAi$W+N6p<2har zNN3km)C~0}X9;&e(tfy#eaLuTE>2AxfSMsCQ0A=kJE{ z4eitDO@DJJJCtFIGw4w60@{+wSxvp` zu(M}}B)RSjbWR-=!%q-Yue;K;5kJ_Vc2m5~_r<7rUYOKN3s!g5Gp#RK?9${2;pp)i zl;2K~hR$rjt{(m9iM|reH`1fc8zj@Q;h>ZLS~gl{wJx`^o6>(glgM1oz2 zRp5$}2wVC*>?xLwxrtZ)4={W0M|=-?3h(*#LREwtiP|p&+vystacBt~V^v^r!;%aK zy3&`ykFm+yg5JfYp%3p-_L4WItwWvZi$MU3oUG2X>!+~tP7jhf;V7KhIfPce)Z)(J zGMFtJfRhvZ(8gJ(apTJlT>A4q}RUuJO)P zcY-hHQ#fR35Do5+ktLy&wK|;qs%7a`)ir1v)q86kZ^5d+o5V9AA$)#lLF|_S;sYmB zlb0Wy{1#D&T^Bp&c23f@Yzj4P4;BubXu-pTQ_?ZQUEI#KBgsw`S~Xmko@yo08?D*= zTwfqcx9x=6s9!i)y%`@C9>avgqiEmbKN!`{xubn_iLHsCZ7OS^RpLR0WiwDZQ4Zgg zBM=!eglDC-GfgWfR7|Goc=a z6InmwO>mjc89mh(@lmj&?X}$~L)#Uz))>;gyxFwG%!Q&;GGKpBimqry>fgQ=Iy&A| z+IK0bt}~?Rf;t_GT8S-xGjK(_o3P{RBpRS~3`x)9sNlu}Sa81UE>VW(OS_YO68~P_ z{mM2g8dLO2J?dY!hE!K>W^>cbXy2y_^w@7N88$W?BaPhXv)NA+x}?IzwS+YXYSETa z6ND;_X307}`}*HGYuTL>C{wYexjRc)&An-K=EVf6i1~_=ek!DKY@4^d?=Vt)xJ?K- zkj-Am-e3jAc2IpijwUJI#PbK%bm@R9_v;#SXSWV!&N?A1JmWwPFEVju@?T`{^&#O1 z=SFsNZnBM*c)lVY_eMHXu`Fj*C2%(FgQIXN34&*?8@+3M3Tbsc9xtzhk;Yvpes~Jc z7I|@P(msD)^C$QIB(QDcOqJe|u2x zkPvG7p}@P}S$O5!hi3B5`AhEgxR?BiscyQA+9dASNN^yP?l)m`auckBzKVNYX5jn? zHIm8Zd*O&F!q!$53RL$K3-d0(L~#pDDS(=7LB72ch0kZXlO|e~G>7(OkK=mKF8)0z zHs~#OZ(c?3>^{Slcf%5|Wl67&uwnOFhGV&15thDFqkrSnFgjo%&5oZ*)S*JjvS)Eg zX#fjr<=)pnkz)AW@u=mzSi|-wtVcsIJ^m608~4Q|4$;E$PuILJoy?MSi(f6sw*Nxj zr^A91e=m;7w4tH@{^0X64LWJIokl$kBhS<2;_0L%crBwyi&Q!1=TQZQB>K~`KHo7z z^Dlm9=+mq}Q52WI1yX0wb$-q|8h4+0r+XmlC{fw)0#>r

)*OElAOC#W~T87Hl|+ z6MZ_^S%XBjddvtq;=de59Ua(SqD&#Tj!Kj^t-@2k*O=gzk4MvWNNyKrZ>7#66V7)1 z8a|IU9`8$Uey+gWp1Yx1&AVJ0o1m|2L)I;;c+ZHtcAv@9?YD-|w>tzSg)G5tyf3Z% zT8#G>me9g@5hr*CqDWnV>?idgYo}v~C3W5hvZSL`yz@QN3tlY|WWrfDUg~A&W;#~N)upUfA&}m6o|Wd$wZIgO|Cp18H&!{0rO^2YVBl^={)XIhS-*`9 z4vI!qX`Eo-??S!_<4_(WM?U6$bmPQr9M&F1Wt>f3(>DVm=kI0rD}vS|Z%iw=z`H1Z zm=V;6j@W#}<()NH+e1XJ8*L~%R*vq{zGBBNcaoWFMzS-%dM|!bgrNgZutFsxs_i?7 zY@$UPT=vb!pEKj= z+xf{9oSiMqc-eud)-HIgQy2V~tJ1_nvh<^Hf#{IBl4p-)XRC_C^ zZ-~QxHk=jn-5TBXr&0R3`BZGl^I*%K;O$RmmRa%x_4oFRdk+l4$}P_HDpHekAr*0D z4QGt+w??q;cNP|r%kr1)VG$Q*3V|MX5gn>5%$i&a2Rmc#66SYDsS)MGB~g^jQrek* zMEvFu4TbiXST{BX>(tJo#vz3KGdP3VK#P31o9oPX?xb76@b`@~InUXQ;t}>3IwuA* z9`XLc9U1n>_?BSJ_w&EC-eTA&erA%ng1KjYNVb2RO1CYpvb*gGXfN!**g3!9KKmM* zXng=Tem_Rf)Li5`+Ebj09&PB}#18e*qLOW4^m$-^`tdZ3JBN1Sz!M7^JbxP!WhV%u z^`dFZ-#+xU@C~+=M#FsKMV=9s6P4$Tr+T#%Tq{^d>HqSvCb$C85?xvv%AJ0J*Pz>x z&Ftb;X#KUGRNS6}4V-V1$N7hmp+BK`xKNmQa4ro08%5GRibN4Jp|&HAbu}*#jda$O zD+bPCneInmX{aqZ?3E3tatn%>AZ7CIN>HpCO)W2fb9R;yont##e%27$=C@Up9eM^^ z94aL1zP)9aw@e_F4@p?K$d)e8kfWw+KUv1XSY+h)6Jiqe>8{&CSiYC1y$wU?=#M6R zt+FC%Z_roTjRB{|^WUi&uI6cw?asXw)e5Ma1*&=S4fi`M5I*NPbRCW$Pg|A_d#(}{ zrkav;zOQ)MP#$-W-9}l0AwIscqsiG$bjRx!ra#do)4BP0&N)vF`r14*U{C0Go~=Co zl6C)h3C-LIrKcJ${4EJ04F?&h+dsiN!$Xpv`P?CVD2r$860kW>u44J>b!d{)p%F#u z^d;q)@V8Np8a5k=D|D{FAX*C}`0Qugs6cx9_^Dt}|BO% zqtapEdO(~q*?@KMyhpDKo7q|Qk@RfoE@<7@Kuwlrd@gxX5~w91yVwFDZt8tBZa6AE zw&MXFob@DY$0}sy^rh7uJIQ6lDl#ZOAifR9K$G=X5F9Ki`M%fA6F2wl-Z% z`Hbedi^%0n8hV_vq^G7^Fi7SX=U=YFX*P*s`&nbsx_fLl=Wd>uE>AOt_ov4n5-?28 zfqkBLo5ikr!Yp2@;%}r8i&a_*>lL;Qljb+3KYDz7Cj==@lRQUMuqCopmW>l;8b~7aPIs8ZzUS=JxDN* z3C5K90ThwKJtLQQBf2<%>5f*T^9p#I zF9jU$#H4EA_(Q>FYeQxU|R(g~&^^PXqnQ*`oCv&vxpq>*3;>HeLC2O@2#u$3ZI@Pqt$DT_$n}w#q4yZLug^TZ9#P1B?oP@ z)>FMcLuSKPX`)@a;AePRP|0qBzICE752a9Z@Fn*rHK?2JNaxLWQ2o`lRD1J~cpRHy zANvnKG1!twFzNjGsHj;cFR_>n{iwU?Qqsvjm5jixn| zJ~I8|pIGMb;~4U%3v8@A&6mkSkzXoPve?3k&+r*;<6TzLVoeE`KEi5e3fsV4uDMwa z7_oXUSdYH6=7knL85Kh9y-dhCIf^uk>?v&FJjCtWi@`t4X}nu7DysVbA4lgMmgD<| z@sK9%rJ=MXQd&~)eO^&Y$w-Bg8OcbpM?^9rWkt#sB1IWd@AFiY5m|`_m6424$o9Lx zzkeM^4u{_NdG7nV&hzu3j)qujwlSc;i`tQ8&0y#1*I*E4!$FlN^I+V6foyOUu!Q|g68k)s9vtIAf9czV|H@8BmS1=`A z=6REyLD*SB>}s-;m>HNLx%y-g>ngty?j0)3+LE($3#}+?=jHlEl70%E05EE2T?Q z^-1|ze{7o~PrW(2C*PqKl_GZ#MM$XfUpOA058yuSdfYk}jn39$q^PN5=Cy&OKKBQX z@;TkbJNwWpA_qpBUvpMMq*#{Ik2Yp0)67dXY;%`#M13z~(HX|{bgc_*TV4+#)|k!) z9K+B%H@W|bv*?$YQo@zVyc2(g{T=WG-%fU?)NOl(7b84bzekIy$KPtK{F5&2Gi)Gb znr}wa*c}*K)Gp=~t;JxT%}e3?vcWSg#ezm9s-9&kN*CATn7bvKL&wsThVlG)rz|#q zc#Qp)zfhMdBdA?{32z^D@_4&dyt8OAeXDAP({}C*`rMuSY=(2z$pjdcXJOSFd1@WH z6x|PU-|fsL^g5vj1#*wq$5rh`8E%JAZj>%QbIxFPQg=$|`bW|>{<*}cWIhyQJn;UP zFEm%!v-v|_dX#JblUiLYfs5Tr>63;#m~+R5%qsZoAjp!Emh7OdKj%@$r}N^iG$1mX zXA`5jL!$RV+;}va-Y7I7;)xF3r0!((o%gCTW8mJ!j9x14M0Cg?Xg9d z|Hpjf@8VkJM`%v&PZtmEMz`pA>59zBbniMbG0XzqnFjPE?>F~7?qk_!cf*deeo|}B zAkbG3UtXE3pesu8oc)#G$afMm(y3z)NApC)M>7PGaLXi~*g zF)ZB}S(WvO@-l$@LqoD{;JvSY)sP*dOyy2jaK_>s2H&uz@C`l4V@IoM$h03A;tmOMjwfX!xcGQf5JB~rYPmY~%ZIPChcE=x^ zN$8nnPOEq(!9y~O9Wyxq*{{5x-gFLfN}hCf1Mh|23S|MDjcOSaMdwC&(c$i+vF`RM zTvT_a)UgRzb$t>sn`F|c4W{7l`hbwnpR}I?AS(|@gJ~Kkp4l;no>khPpcp)H4o2;Vrd1v_sWY_cjZD27hWBB{0 zuO01K#yfxgT3GjwB+WUa=v8to+4stjHjXL8zZ=IS{X48^V&FyC&KZTQ z!9T>D%JJxNT!l72>qWVR=HkPxRoM7ullV=&0w%L&W7vpcw0o=@^)RXx<|NeMK!0^| z7X}JWe7|Y&gTHsD%7`mAFQZ=kP7+#Jjfzov$oXO*)b7*dewa59mhp~2>_AK&uz)5% z+rXLZ-RM#99elM~CYoPdgcFrJ#HTfP*uJYC6s?=hCN7*pFC!13p>iuR9XGg3cSfHp2dU!}&wkr^eFK{>C*=^)? zH3tIU$x43;cr;&=dQbd}quUcHk^-T(tt;((5{yGXF0$0LKG=1hGt;h_Vc{`d{2X;0 z4RT6!Zn6Wde9Kt@-Ll!8dt*F4-~&_8@FEge{dlflKPAF4pNWYp;U`8`h!`8aaDxJZ=c93RW$)htNGl8xIFLS2sR zL1!Ncb&FOY)xAI1xh`8#arv24<`CaeMEhZrp(f2z@S|Py8pm^g!sKHB<#_5NW_bv` zJX?kjeoJt$S;VCKJ)mIbM4PKykknPg{y8lkGx&3{t7A_xc1RYLd52rARgG$X-(aIZ z@%P4iUHJ2z-_H-$w12`^m|s#M{Zr>*KY%l%>n-TE?_ZqHH6yh?f29e&39ufiLnRfx z1kXLPXxQpc>KmJ}>bJ7wQ4YVScW!}&1FLBVO#81pUnm*tcyvgruV19((_Fr)93$LssToz?SiYs5$={ zf+`(I&o`6$SjJP#zYH-}btE1>bKqRd97sZo;W&OMt!k~raW73;5^^0IcQ2uA&-nF@5l0ISh;mLy_lwfi|e`bY*#TP^_(A>!MSQtCorL`SlUN}cb}eELQQoQ zuD07zST_x_{&b!lsPTo3v;yvR5BPc7hD`c@g!`2Q7Ou+sbetPt%5&W-PV7e8nDdyl zuM?%;(~%SMRqz`Y!go)+i+K43HUxUW`|&!2&pO4T27}ZU_d@M$EU6yLM=D-p6uMDw zrbBxV<-e`cit*r|#dVtG`$CqYj`o4|q-ZRDI8Yj0Dxm|3u{aU) z0eTAqsjKr%SoN?c&%+LsmCl*Y!3Na)IEUx{BauDsIJ7nPLa*3_Ox7PopI()icI`7l ze@YQ27mPXo_6yoChfu|^ufn=>s(e;o0IOB%Xl*c{H%Hw`QIESlOHAno?^16Z=7#Is z3)W+r8(CH(AYZY^p-@)#es zn!BfS3uLKUR}lV=kD_CeCy02gM9eW%s+ciT7`N9Kv!p8_3F}P@cFe*92V2UC4db1H zE@a#B8hux|3z{G5P`PcqxKTL@_4{~+rSAZUgF;E{p-zIEF0BWao^h@vQZ4 zp|*z4rcXZb(43dc-P3(}zM(tW=jl_EQz|W6GlFL1EEU7FDslIg8a>s$$2taGM6I(u z1$gkC{ADdljqggc>VoLW+p#$I+MN1q--Q9gdLi5J1S&N=Nk>#flJkM0-6Q6JCO-h% zsz*-$-lKZ2F1{zbvu%6Sq=}rP9Li^+Jz90hYQ!KGV7>s&{#Dq=-XrJmAo_LrJuE){ zW0pLhyy3)1I%&^6ZC&@{^ZX<5k#E2RmtFAG9YBuV0{C51mBt#s!#=mASa_6k3!}cX z@BSlbh29pt$Xm_xEZomDT!wabX+wa8F6ll#4Bdlo*{nD-3cqVkU5n#LwKblFPP3-p z*>`w9g`^F&+hNH2KXNJGkw1AYCYKC^9`)g~!XnoAZvL&P5lm1_fzpp}Xl!PL)HfQ?m6^0iFzOf1V>!6=H8JDd`QaR_oTz6BVO~XE7 z(smopavT9~^D%WM^c3D7RyU?m*LUE?^kBSw zcnVf~F5tH<-;vDW9gTo`3^r^=j~4E!4K~5Xqh*5U{lWCQJYJ|G3$&UPAaI^9^gnCS zAul`HUveBc)0b3#q+p5=4kiBoIevpX&6yrXW7YE{-6F?AXGafuRy~S71P$hW0UiQU zdW6P4GjYR1kM>D-VGKMmr~aRC^!F5GAL~XNL$&F3^)KOTbayIIvJ=}|ZgCL(MFc$X zp>s0sq&w!Ba3r%09^6$~|3Qa2w$#HZ=o20(92Q~}7V>Ue75;A0Bm4R4!ri?+grcjK z&^WUj_Z-zIvLOt~b(XZbY&vbSQ=peTzjUeKVzK}HJLtQ_NnEyZIRY!4$;ZDRF51i> zi=E35tvQ=CO!lEqqy&nW?YFlO*LTz4 z2omF~Z$o*b9)$&jlg^$(9O4;7cCRZHT4~eu53itVHH~a1+QE7gKifN{LryGZiJ5#? zzs!%S-|@3sha;1EdctDsHCT9>QTfkTSZfu8H|=AYeyJ-Ng`}}X1@|%ifGQm`Ph>$7 zbv%B<|4;Z%F*wYErcC|}Pt`!W#k22jJALSBp(YhqMk90MMFcY#RzMLPjN>N;N(8@!` zR9kQw-knV7A{+WV6Tf2rU~8YZZ1Ll4)~%ZRz5QDujOapPtvlJ@u0$tu)DbqXot<)% zpl9|%M89&Si%uobD>9((jWTp{@-tTG$L|=Q^o9NdB;=(Kir*6+BUdquj%<651A9Ga zrNbclxp)&sm}}C8n^~Az!E;bT3cOG4!;6iUw6x$7Os8FdXJsRX+_{6GervI=vQ*f0 z(2mcv`8>_(EGz9=is4(;aq%htJ2E{f#8SlG1}zE?&%x{C0T>jmN4+;WkThy6ts8rS z8LwQ4nCV|o(tfpQ?Y&#lx*03T#f;CZ%68+`1Xa3SbCTySH(*fR4#7EiAvj}#)*b9g zJ(YaL7oW7qFWN^O`Mnb-J1@c0bnAu;jl;)_)uG2_Vx9MrC3zF(xAdnZdA=q(;< zm_wO*oji-7NK+S;6rT>UrHqlbcw96K)1O%o=RqJpr$22Q~szr+fbYtW9(Jg}~D8B=M4? zH!1RI${sfUjWT;-+%v2@2QSqr2JLi{k0ILDLvQws#^gqn;y(XVyOrwQ$ zW~9N-WPfe;;UM>aRU5}+Vc~EgSamw3?boL#-2Hs_@p6=pD}=mbfiP5N5~aP_gK0st zNjP;C(xZiFJETpATNFv+atSP|p0UMhw0xK`ecixtNyi*UWI%>xzwu`E2%*}09yC@5 zP-4GtoQpph-TM4yKfkRK{T@${*xm?YIs*?w>1JYadT=;Lg2tQo%dA4)xX z{6yhOW!kX3PGa-Lod#wFii@L9U{^>SOMUW$$;%Fb>}`@rTO<8qWEcQ^q)w<^BFNV8>qQe&av;o^!jElTnF#@CvrYhHFD; z>-iSsnP0)+Nyl+}VL1wfkC+o75!F06?_JAI43Vp5TX`R-@t-NGxG%>(--)!I-oTWD zYSgy*9L90R+L$xev@pzu_PCvt-Zt+JIhT95b(Z%f!m6eFM}<2>~Kf1Uwfclka zh>>~U(7=1VyF2QogEG3%zr_l)efuOaZuN31k$Z~{ch2>^JCTie5H8*REQIf}%b>l0 z=LFmBk#m0

H?XNMZeMO;TrLWR`v$6lOz5hME79F>d}oM`5~MD}4=AT>3P#FF53 zbX4~z>vzbKPrPnU3wiWQjDn&@toJCVtoWT^InN;$J?}IYAvrJcshR#-`^FDX5o;$jC_t&TS z`iAuCT0FnY{boVo26X)q&-~3!5oT!&N6Kd}%1ZczsPjpvRsOqJI2*UUxh{wHzi??Fk_ z+*NG%ZGRdP#(jX^E)=QSg|>U?;l(T;-f1k9uHC0c%Idpd(4`wa?>d95Z@{zPBXvLTlQ+qZlg#49Q`34i}8@!4^qSljOsRyjvv2A_+h<`5a3cqiPnZAFE>GTn`TCHZczz|WNOblc8|3kp|~ zLDM_j<(!>My=O?)SDlmIN!*BxO%Jf9u^Ww8Wr>0vF?36LA$8s~pkLF^qd+xSDqWj^ z$%DPctrw@UdED#j(p<%qswdDWE$;9g7Dol2O_2L%uSCTlTyVHmDcp&uN08+KAus3# zX0LH2O_v8qzG6)e_14jzW9#WedWkrza~3L(wPF77-E@ou#MTa@8IK;qeBcjs7_?!M z*Lu1ycNosKLuj-Z51aRw*|)coVCLXOChgj2cJ9Yo)iW?ZNrqlucA!-rJJId^9hUNN zJ?Dr7kXKXgS|sW7 zeeb}EJT$nd(k2r{k~^{wOQtVkW374DcCS9IcQL_=v-9ZegPwH#T_@7NWeVLE^IXfI zadey%No_WF6CB#aj%lwGN1y&Bd7<=Al6Y$!%B=0BYX0RgPZ>-j6V6Etcc{RxVKn70 z{(y)MCHl>uLxGmQRGGCx43-E;De`0AcN$>K7ykJ_xeGZ9Y-oAw7ZfZ^WiIs_k^j?5 zc$9BL!wqKQ^%xa0852aeYg-UL(VEWw;WL4Y@hHD8p%Om7%P)+;E%`EhuQ5R6BU=ic z-G)e>@z@-^5e~O2aH_l=;RKdOJYj2&n$n!F?sPBlDc%%l z(DwTWp^(R!v-d5jt<8`w*ZR`A>s|29^CMQ+sFC^Dxq^>l33JU@M)n@p@$XR(6Xuz6 zzQs&jirbF|Pv443S27V3WKOzU<>`~Rp=dv|D~&6eCe8_dfP`KaXb%da@`&-Y_?@+= zHoOrl$H~&qbQPg{>j%z$&?e8s>0($$4DC1VKvIef=?B)bfRla{WHbpq-kd~CHwDr> zI1e{lr_$P6i%9dj1{KFWLyq%FiTZ}KNH{l3R7ji4h8uy-j;eni}!yK@V~ne3R{*KL$GLwXo*1 z-$(rZzvJXr_+Ch5BfYgKvV001tT79Esiz8hEkAJ^S{*j7B5Fo%90xQ_GgoTE^xK_9qtMR{Nu?!CFi1}QsJ#V0E| z9O@6wGiS%@dr_1c=SX)h5&B$9L6oI3dfTKfFhK zN*|#}IS-c?YE#fGCAQk}8hdOzlFEjBg=(}s_4m2N?pA^<7HT5Srj{)!OvIMaQP6YX zT+e>DF=DhP#h&U)Hudf7hkPKurIkn$dC$uC**x^BevD;JL3HIu9=y1RK>O@KnmOSN zR*f*C8y06VWs5hwUSuJb_oe^iV1$|1!A20!lKBqjaSRQ+;_z9sT!^Z5raMo|1QYHp z`|oZQy5=qG``LzKg4}6!K^0~+n$qOb3#c(1i`MDrX{i7|&dN~3o+;@r-&iX<(K%2fBY15PiZNgmcLap!F zPdqrO67Gt}aqm|sEp789e^x8x?2;j)KshSDtH^U-HP|BCj!j48#H14ubfWYn^gUl; z>DnOzz1}7)Zyb!(6H;(*aX)Iw7=quNo&RZ8INjXMdzK1yI3*n|SjyBRXT?TwSb8{; z=3CITPR>vA9Y+tREPzheCFCi;8tc26uq4|#l)7qwIH#%|!EXJ88uha8$^5nbc)ET992 zKaQaz6Z3Fq<`DYPQ(HVZD+O~0nDM=GK4dDmpIH4Qn0*m0`CHN={g?1)sl}e?7TlkC z0N!thA}d=%Ou0COu9q$sHnhvI#&^|N6=e*!878z`+n#Lu-NavQTblj22oHFk)=b`# zY?rvxmgq?oYkrhn_2Hh21-ewe(VGtgPq~}3m|Et2b*iTrL_>+To99iDd zkC0scjPzwK!uDs2NX@Jc*#ZA>PBvZ|ZaGc(t2+QIZF7;CW=ytgbYW#MgrpCql9cC? z6!nWyXzV0paL3Fa!-ZnrE1IYrt#LIQr(l8)sw|kcstlblx>(?}p~FR{`o` zdb|v+Rmm4-xM$;v??4*b(vBG3q3D{)yYdCYsDE&vxNqE9IPI%NTz&%Yv0mg{HjsV) zcW5l*tXCZcy7F|+|M{ZD1FfiJ)GquTsfhrGTeu!HiuxK4gYo<_mTj1b)3Ie3%=aQo zmitS`C$NfjZq#(`DVurt8_Eo?uL>V$P|TFz~$dEgIj%cU_>rjX}j4RSW^LPeQ|Fq^jn-XHV@XAa_e zRB{4cuFs@7MNMcu^9KEUE0UtQDXsZ_2J4eXvaxWY1ILVM_wha`yc$BnQ*~<3tU~oB zQP>=o0qHA$I@Zq5qIO$gesC_UOz$eDb`F#Nh_v@;UicY5G_MG=tMYNF!-SlCWToF% ztYHaV#&f4{3yROQ^PK}{?f)G{m0{g+Fwq>7Y*#}@bu+38eCgPe>$tqE7j51oLs83D zvE?os;C7~4QE8SNeQ6npA?s>kbALPq6-x2Iek4VF9Z0ABPC>|3A)`E=J3gj|zkB)q zH+~zC!F$Bs+?_D{0!-IGLA&Es$m_4ii!PUi8F@sBz8X}y-Fcr&5_mnBy)J3Ww& zmyIU7M18SQ=_9i0%&UrZq_p`16OT+I=>i8ly@ZTRtLHyigo7yaDT^8RC>li_kR3kV^kGvXWJb5~IIS z+>19I?>>)1)pWkMx$c1p>w1VT-D**~=aqEs#G6<#&zu}*ai7JkeuS@ENt_r)e_b|+ z%cic!M~5eP`!@s)dy?^ShBL+8QY0@Q?%mK)r4EIuG|tNhyA<82-0%W+eE-XKJ;*|7 zxG!DXUc=|Q>zSne5FBf(aQL|@4a>{L>Y-gx{AD^lI#A+qdbk8z7yrbiLKV_De}N@s zZ^oyV8qV~o#5#V*mK1)&-QXscU}i;+a;K3>ya|oTOT>eMOuXi-8|m_kSni=Bo-yXW zf!ikZGeDlyazpU!x`^4>z^g;!wQ~7A5e!4rMl9RFN@ITL}dI@j`DcVO*?fT`!Rf^7~FfM#AmBF zn|JvRF3#C5$REx}da5do_}!bm(=KKEFN`G1UY`-s{0{ojH;UWZhSQzfH<&Ua+iUId*X~pbsIJqMmYS(Wf&T2p2DQ!gCkz2yg z)$U{+{8~`zv5T#5=MJh$9e4$qkV`6~K^3=gf4nt?rswgy*K|yM+lPAP521w%QdxJS zFsAW&IVKkMAj2w8=4azUDOV1&#^qO$HeexY3i^;=YX-i>j>pL*dqw@L-tgvo@A^K9 zboH2pm_JpOn*5x_%oC4LHDDjo3WMmtenumYGzd#abYg*m43(Prv!QNZu;#;84DIP4 zE?T{aQl7lPu(vJ9%{V9&A385A4q&KSa0WUuCgk69Af~(?Ne?t8QdjPXi~98xi|1XH znkCm^+RGHNHfAKcUNNH&1OGA4oq=Snvl%)n@*s`Kw6XXpj~+(tT2$Iv9{Z1EY=k#|Lb5;|t0Y5iWr@LrtZxlg$2piJv7 zeZ!UN88j)!5jo%NNxM27GFSVdpg9e?aw8}vOab=BuFPZVX8gzIjh3p@M+1>}9Xr^U z?QhsQIcGt;qZ?LP{YJC_-|JQGXZvRI{>7vQBm_T3O%Tt#t$l`*BlNIpg*jQBpFk%D z4WPM06ES%~Ax?S!#UgJhs#o--=4!h4Y25W>Bh&Qg_o&Aq87=C++5PYWs%dTls&$Ls_F*209@A&=n z!3Qj@kTaB!#8Cd%w5SQdW3Lj1}lK&Vb^vW}3tLDE!GS4Y2U#(9EQXT2NPa!;a zSkow;xiQ)nj~F#8s%VtZit3s4CnKHB*fJHf2kTQtOfacs4WteJj%><_L%6zaJ-*9Z zldVxIOtu;$;`UbY$@dVbR>;%*d=*l1{wlN-D^b#r38HqI6zi?dV&?r2l2LY|5|?CQ z|KC>hwwI-*Z4oSPA@3dz>ww}J-j`KeLRw#H;LZ4VW8fI!PSh5mson}rDdqovPqnqn z^Zda$&S(gw4V-V}`mP9mO;OUsiDkSCx{BXf!=QD{fx_;{{ok>9SUrpTj@HrCJpp*R z@u6gU`FwVxMOCZ{<-W+)OM;0Dx0Bcrxd(k9{*;&3mnKXDis==B5>V56?bz+(r!6xC`MGU z*(EEW>|BB71>7s$s}ypzL-1~AE%Ub-NPqseu#KE?FlduJZM&(@Cb)9{TF86!yHJ3c zymLNv_D5`U3ZdJ#?dZ?y`DD7xl!hOi0h1ovacoKlN~av>oGdL`?me9z^fRL#Ir6l$ zOK)__5 z+%j$yo$o78J4~}d#?qjDd`io_+^n_Tq`I2`$JJ$PF zsdU!4GkCw!kg63Pd8AewvJa)>X!WMAP&(R-vo3!n-`d7f(6R$;XH9>^4OxaPv-wy# zU^1;r$b|qyk}i~|zE!=L{pronSi45b= zu~&-xJID9#K0UQ*SjBJ}W>QfwT*o9%y!|7b^Fw$SHE6QGJ#)_z4==P?8rB(C1&uKZ{Z}SkJmCdHq$1*(g z{F2XnV}u>2`p~HeKm1&HhVuvdQ(&Vi)Z<2z?TXoyznMD z)X~x&-KWaZn^;vkdor6fxF?`0vjz=%Rd}b|gJxg;f_18oS@db{!dT1qU%gByY-bWI zM;?Y=c1uY~>8CG=PG2*yS| z!?!q9Qg-P@?L6zdSm`+%R&PfgW6Y@|ZzIBm5iG7sfj-=9K;7o@VsGa~{QM4@lcGSi zGM*T`d^h|1Jy>*_q$GJ(=*q0R-N$vG4T6T|d8AxWqxZ(z%)a&p8_9VmOW%FKgsUHL z^0;~NlN2ACexQ^MuxV!TdnduV*GN=-45S$S^Ek;fn!Ve2pDyz-Q!fs~X7y8&?;5W3 zQ*|+>_jrsnw{etd%V%#fc2qZHAeB`f!)0|R3NgBZyQUH7$-jfRoP;&z{i$+i2VUo8 z;aB=?Oo_|HrcqlkVPm0C`oN7|xI7p7vvk(W>LHxkjgbD(lTYVE~l{{6cgp&)b{co{o~Sdnk40@=6sr<$qT=&ErbEk6@3s@Cqsv6`RT+G#F;qV#V+}7!`3R zsYfGb>@kAv3-kZG=7)73Kts#7!J4z`-p}kp+I>&sz*!qA@`|MYe*cBe-yuI>DR*WU zAli4I)VygF-5s(P=D}-8_sbTj)imSjKY41;(V#63H*s>XEN&%P(qOAzWUH1y2E9ix z#c#T_A-@_nAp_Y_y%6p{cBRRjdAexVV!RD$U{~_AsHk5A>pW}6qCfE+NV2yOp~d|U z(dtw%e-~@|6+$1~E?!$;GTq?@o? zXG30|sKPDnMwI6w<>_@;nbHw&c5^rRL<9HnBr@mIEC!ZRH>DQD=(JfTeuH%{c zJ^XuX7fhJpN-gatge85OFu~{tYL`c|M=KR5WOOTD4t^~Zja)*zyFP{W&VTS}jTPqA z%@gikw!uf^3iMIdqHl&4_#HN$I_oFXwPFoY*OTJ>xaA(Q?jqor#Lq0_5wGJ&$_k%Y z(SXrpS#$(lOIOq2^a-fx6Um-JaM7 zW?_)cHrV+1ljO4wm`X7_&p-bgb4yV1%z(3)Z{omvTX^;^V#Dv)Q}y_7Y}f?_lI44l zQ!fXyB@G+k_2V<@yGyacRGF^Pd&C$`q}hIhXcXt)#(p)SsK6O~x03{8m!EjF`vg8e z;$FmoQ%QHb8Fxv^Q}G&0yjpVtyo)EK>UdDz`BV(c;{X1Om!N;}Gv>~hBLhnVs`pC6 zk3Yj%*IN#>qq+~pxsAp5jpJy=5?SiC=m zspC`#r8i62xfY|8q3$F$sL!Q<@IM&So%`C769vA5rjBbq_-vGi8!K#Se2FX`I|b6IH4$W&ra@Du zwsEiRA<4B}A9y}!zqom3B7DtF=;o_8%p$I&C}#B*6eRA(-uwMAW?eGdC$EN(svcs| z;+xpeH(u&F<374S>`i?Scc*C&Z0J_Sb}FW+G|_ab_$4v{4r!hE_I@xzCnQ2&i{~nM zR%OgZB^tu>)5bj}P?*L5*xr|r_NXKjWow|^ekDF09zouxRD zLYkcmkW{jk{@aOVO5bD44MpWh9qOwcgPr~^!^EM|9Dya-_9ewSe51^$Flpn z^{lZdknWCaLt!1~c0P+|!Ihqra=Vk6oS(+h8rPxA!A01*z=yv079pn5m}b4_U9rVy zSdZ-t{YEQEzIeOQ!*cGG+j|?&qo>o@)HB!;?LsHbJ*f2XRoG7NN8Z|baM78A&E-?D z`&b5c?KY>-vL-C@xQv(sPcSCo4E&?lBUhRyj2PiW5t9wYQS!yC`tk*YH0hzZFX#0x z0Ns&Z#nE(4QVHifs$t&vepjDc#;{~Yw#@@O5q%(Es-9x0Nk-)`yZaYXd=Fv~i8 z7vCj|QM%lTEJ}`HlZ_vSr>_&sT|6-Olp!s=&(DzC%Y~u5b2(tHmv~V59>xh7c-cOQ zvMPP4L!&^Dt87B}*Uvb-oyX(XzQbatZ|GkaC~BOFp&gm8ka@Ze7ayuflOOICbmpyx zOqXovrJ9gde{0ltb*CqNXV71suf0}T1q&nRt6h&b;Yw|qIPCU#)Th~y%`qJWG>xUX zvwabKeJu$VV>sI@lex~FN@L!jpr{P?3hA+tLLNWjT7Cud6URPBO5;z4)D~Z!7XOv?*j+5kgmWM`>X*v-GND9 zuY~s0*GTBvfLEMLa(nS}ybZF(&w33?(hH)zJTq$l7Xw-SWZWNp2MV*VaVND6U0FAs z`mHqOv++;J;QY~E4|wO_&77?W8%jm#t1)M3B0YMt4~Nr#VCc}F*wM|H{wWsY@zrML zoo`M(I}K=w%3|`!&SvRdEy%2@4CA%lN#s+EA><9Fz!ibR$zoBcFR~`#imtyQ9H7fe?iY;vjrA$pP`r7v`wzPGp{a5NGj(3NXXV5^-R~W)} zDC|ODhBu*GXSI1li#MJV2fZo<%$XR&SF0NUs995pulo=&H<30F;gT}Uu4v%(mV?_ zs!sP8dgaJcvGz>yP{0MaasGDX{3*1#ol&;w7U9;Jo0vWMBmBpCFlQs~3ViVqf0r7F zjt>^mU-2R8)^tE)YPpA;;zZ6$4@Z+*876mXQ{5*E9CGp|ckKv@S*bxG^UtBu?uX=k z@NIMyZ4qM*$Ka0WN+lu6I591l?z*qxp5A5D6yt(@IzH^d{TO!hfw4G_@8|98p9$N$ z6k*36XIk37E45W|=iG@+v`a6Ljwo{eiR>LXeXd1kd;<09R*3bJhtfei?xq_hPdyS8 zDQMda`gz?2+4p${UnYxZYb^2P_8ELX;ZGs8mU!N!nzif1V*C6WXfNzTBgRPa)kA`l z>lGo(nbA{!bVrE#U#R_3px$mt5|fp-NL$qc-=h_na#DrrYQN$&&#W*-d+y#17k-|R(Bp4cY*oOA4vuKO4+%)4QvzVKuyd`#(^h+Wc*b`zhfq}PhOeu@ulSK(WMB` znJ0A`JA{4~`Jm^g>(Ct=Ny($jki49u&-92~*5|Y(Q^mgg82=0qxfL(3vuh)_)pJl@ZEf zQNlx%Sv|+VWg5(0>oN9h=e@g_K(V%UCdH-6P^QXz7>c6QWvK=2AHEbrYmY)WYD=?v zs32{;CnY&VQ^Y{+|L-;5shKi=_m7ynC0l%wkpT1NK2(2G0awm87jH5RN3F|x9DdXv zU#@aL-B=?e_EF(Q;YFwzow)3{g3P#<^{lJGBx9g>dR-i<=k`RR132j^~cAF1yM zPk2txf~x8esZ$_mu5<(Re=Q@Qb?5MG_**!-D$v=fY82G-G7KNAW5Ny1o z*dK%&-_>Y*)20)m3U*y3tY?Zss{3G)9UPBU;ZGsEC6v|#s2Q?ECt zC$~?aIhAvlD|h1c({1p+Y(`04UVxOdVAlQu%fhnp_FOVjZpw(7Kl!=!nXQ=N&<$r_ zmBDVNu*GiiXX136)iR-0KSlcP-Xtg_@>#>+;o^Zm zx8Ub@5`$h$qpK;NbUg5^aDQ&7fOY0LnO8Ug}-owNYw^bDVz6Esa1q@U? zq_4-G5iadqfVG@+^yqV6>P)c0sO`R#cz7D+=_}E@q6YpQIwf70`y93xQpA1H&KTRR z4@GKp$7cQ+);{pX>iO^X=&GK%)%)Vt5w!?iI<>mCbYM<&3dBZc}dv9$#U4HOZWMyVVf1-|| z!>X|SoW3rWeRQMybGqTxlY${e_1yJlRr(et!~HHB!PQ2+$D+9dxa#UEl(UT9WrHp* zxpop6+m0sPj89O${|BO&%btZ!W2kk25sW)xxZ8OfFmkI47R(8t|NNT4{|1r}-j~ii z8jg;eeXv{D#E&&~rk95@uzyDv;=HEOGuQL~|96IZ^zhBKhj290n$F~ulMJV3*jBdADT3Zd6ju#_D}7uSE`yTli{lTLNGd@CFp&f26j z-<~8(PNF}{ZoJ(qM(>L#=2kJMWjP}Hl{%irOuWSn4@gCoEpx?q6OHa3Na}xPbJLDq zgZKD2JUQ+`XEb)<^uR&*aCftCu6r(IF88O*SXJgm%;4WTO3;}%i-p83j7QK^ir#Z! z)cSo0oi#bcKdt|V`8mxP`fCHHaj6YE#cjw~BqK=giKU+d9^!4?Pi%GC;i=}kiH|)O zj+uiP7p;ipU;fD=tRaweFU+B-ll#$w3;{2``HA3L1Npsi!Y*SsmKk&;TzbcyXBpde znJhe4ThCltbMR~UDp5g$DOYH(ChXWNL)_hC{L1cIsH$*bo(2iZcx_8>47X5i!WgQ1 z6(G#x#V|DdjgPbAXu_CVjJeNIc-&`L*7e}R21WW2x`L*zF~P;-EK4Xl2#f80*uQW; zb~AUTwUr!||GH~X+N1zj2lZN~Tr*6xDZKufOd zlN(&;)WBg-8HUHJkri_UF7F7YrJVz4mBoD8KF5UqU5>!sH~EOT)q>*-qS1%39z#!t z(y*C^6fIIDh4a?fjSPf;4d+M9;mCOLE~Hk*l5n6JQVYA$W+FjH80WY1RUy1DrHH~K z22yUX7A+a-4lnOuYL8H+jW;jjYTX3>xpM@oLGY~gpE#$=M+=Wvj;o#H>Jlivf zt{0c$sZb?4Kl41MzcZp;JB+xKhBn-8``HxT`U$O(PZ;OFfYWUmMb|Fe=F*!rG2r=X z*c{vfL%U$IdRK^Smcy~Wr$Q|uC0uyQN*IOiXt* zRQg1pJ|DZseQFd#URoX-o~zRB5neRwA&<<5c-Nt)J?4H1>4ZMl2HaWUnGG1ua zSVLcfB`B-23$eM0;zwu9XoT)GB#kVEVVWz6m*L@f<33L+;&Z( z>XFk$$J^%N-7-a*H03iw<}erV!(6N`*C4nSvEMN#(mG;CGA^sRgi0-%X1ti1Z#t3k z9cR27R|u2+c66jC9EDLMIoYZ-ie;RHjH+jNV7UP|Vy`f+h?QVCWgJPZI)-0|SJE|Q zmJhe6#L8gSlV|Ujb;>8$bJBqOP@qLS<|tG2`I~6EtBR2cvJ`PbhwOxO(eA~XP-~b; z`UT3gpehAnn%g*KPglY6`4)bn=H+1-464 z$gZLELRz*Wq3=j~wJ2E7eti}z?IiJZSPZw7IlTOhvtb`GjGa9y)HllzJ7m`&Vyh3| z*G7qku3d`HCQ8&{J&q(wUqRuK0<|sk`i}#EBd;dYrr9@PyKo15pI0Hj!W!E@x|7@R zk7&ErjjE|V+Tr2!{h$6R#uigy8MO5c^`3-{rv0Qu0`EZd&mR?(827HG+e2g z|Fo&|$8 z!<;mAEI`;m+ zUB7gkeR}~TRON&pyQWa|_4BaW98J#qcY_8!#;il~bgkhl)*L^@-v3qHdSk}$sFWv9 z$y`cP@PI1YTe+D1Psn3Di`y&_|>xo`nqHG5#I{ZqKhFELkP|hK2vFl4YExE$6$r3c}_Ee$Y z;s>19p=MZ}A<#Juy0gHTeA^!3=PF|oy}yFD{f^=DD>K?BuSNpnGFCd@7t1on(J3Wc zT39lS&d&@d`$G>!8b)HwAG?TGw_`ktt|NH7d^6)QbqeNVX5;)I#;Ro8hd_H1fjiuc zl6&)ovh6pKJS7}EF8R{i53Zyy-@&KNsmA9Mnk1i~S+Olti)^}-Xs<>afBC~Qb{D<@ zqZMxvGSiBk|G)W$CDXCQ@g7jIUg^; zT#|8--Kf7zHaEbKea9IKJj67OT$Yc-A}!wY?!0c^@J*e#T&)FJst)4j&AS;x*_R%8 z-hl6DH`=P3L6`3(QvBEhf?1dw^egmfuw@p_sJx1QW}v^P?qbJUZOUl*iU;B(^7`q6 zBd30&>lkCCd%fj+vl_Wm2Hy0c#1@Cos-SgoGWM{!yhbr|I;b;mOYSAkcgP3PyL0nN z|41w6`rC{Yk{)67;Q3tUnj$o;RHiVUR6JPiK!c_arm61(X(xkn z>N$^dwN?}nHykP-$C7n&5^Y)R&pcu;z~UPaEjo(~FMA6JbGbSSNCIFzZWc&7>bUe`89Z4!wRGBbpq``Z#)X7zjX@ zZW%?x#U+LN*GEFgjy31=X8B;>nz=Zty_B=scLU?tOy=pzPSG<_0hienNbg2SQ-V8l zQXb0_UsnvE=l+|8Qxey)RaOGq(ICpy2%#}k;$fLHj2z1QP_@QYPX6j{+Y-RD|X*?Bkl2w`Ss;EKHDjf%f2|g zNQ|J}IdN3xEk$2F8W4ISpLajG4gF}Dpua&H*~b|Fr=YdsnXx6eVdXlgNUTHr_gF}e zAHzL6<<6UrSS?PEVf(16XX2{S7va+DOcT>tr;Qs-QBrx-eq|Gd&dU@G6t$6-J%}W? zoPyHd%Qz`arNmPWNZFuHRxVfZXvS)C-#;7+mE>v5_yk~)HLm=0N8W?!)MRgjgOBxa zrKgbb$l9PUGN3DOjzLHIZ-s7QAoB%cEGgj0-?nb0Jy=LF60a^#~ z=%@*z4L?9bN0&;4UE=6GE0o&IU_Dt`s+p69Y4&bh>3m-yH1`0Xeo_kGlCy9(H^pOO z*ghQDX+bTIa=F8P0(XM#C)2SepfWhyyt`oI|EOmXw?xe%;Yc^Hp7p2 z5j(S8TuEaC<4r+?Ie+6(GK$29m|NXv<~^x@zbU*Voe^#X_qXZ#>u?iNvZ{Zj3?2i$Q zmzm?y7rPpbXvTFrigSO8+o!bXLGL}xnNfkaYP$5^+K^TmZ01|q_laFY55Yo4o%X!C z!ylAfNbwFAxlJxFF=?qe_r}10Xz6~I+1r8xzk7ur)+mW zgg4i2U~sM)gb`z?t;(AklkW4zX6I1ctWK|6C-ZGzw8*tUgT9~k5iTByVO`4>lm)k8 zT89DNC=~J0rxkH%&~1FTmL`=MgOM&5Mv5+RbfBacF-g~P-&J4uGBgo)x9k-9jmYPW z6^D@0TT4!LqaRh6Z-R8%7J6Xk3U@PU(UAkLWH&59tTXE)%l&kV(qCS{%fS=q^2Kb{ zX&6WYk7m+X!+6U7zD@AEKN*Qi`Lh9^|Myn7s|9v0~>y3g0%Lo|(JQWBVnrUUUqh z^%|5oCLI^;{pmw+0*y~#o}%*-^wgygO{ryY@Rt$>zn@6SAB*ARv78R57vb2c_t2TC zOtw`$*rC8$s$W|$OzGxx#cLw0fsWg$o8Rp{Aw?$eX z36h^SnbO$L(^PL=;$LKl#vKku-JKO=ekB{er}ra!(0Pp9J{mPDV`$V~)}y%e2lpb} zk=uR%b<5Pr;_zwVXxulr=6&R?#&>X|W`DtmOlQo})}pt^jp*RJNBC94yh15gA<_2` z{3^`I?U@-p_ye*#qtExqcq0X=g` zt#FxYNQbM`>Frc~;o|2=+A)M>GpQAqcMrj}H%)w`rY8<`)*DwzfJl}vaG9KenM-ut;u>DIVW6Ax9g2i7M+}FR!U6vok-b=Eud*2@_Vg6XS zMUs9g4Ix+cL(orO!O>qsu26C=9o$;ZjrAHxUtWB|q^(~#H~maB>Pyiv=R6FZYeNNB ztf=H=ASJWj&$az)>D~hy`sNpiK=D4Lu$lSAdS~W~cW1NU7`k6RhR8{RCNG$UHx@_H zIb^7?y3>>PM3k((5L&@g`ig%rzk8!uyp50EJqSca-=skq1Fg6svx0vxmx-79QC;`JQ2XYN2<+x#H zMib2@a=&EDxIIPF$gy6T-Z|UQ;FCW5gHf~S+`MEV(1Ay%wU`V4yNoMp2%-^ehQ4b# zQEUAlY?-Kp;cIte$Cw+Uz3F}E!$1vqF4Uk`7n%R-XA5kY3nU?X3Qa1oLHELGgup_4 zD~$(ztj3eg+PHIjJZ0Flv8+`WmiBSPBZE3PUeTiNSC@q@162xJ%Z%#zm7M;;I$RiX zk83nBpt|Hil+Su6Ib%%eap?^dzpumcDr3qY*Pmt<*pqp=AveF}918ksP_4F}FwMXW zV?Kpa@n<%>PptK9Uaw0AcheA6y%VoTw+bujQt*S_C#xUJQ}L34LZ7IoIBsep>^ank z?r(a~{u4+y&-l>4evE}-c^W2p8l;nalRp}0O4{CL6u&N9$UnB0HvIa7P&+BwYUqja zeLcvai8;Tlim~jm3@ylEy_@f|7$bKhr5{(I@9$oq>{J7PI6ULOEUqx{)DNzApes#n zQT5E-?94s)pN#t5F|b|}iuwshL{GY^Dnir#mjC+w6TY!W#j(4}AE*qzQE+#(q3>5WOZRG4S9-3Ne4pcte`>rMC<}+~eup=yBL@p+`~| z*CWV32%n{tv94|w4N%p`^Xp1TI=%~YC3^7Wp$$nM-G%Efo{ASwpTV5fE4W$C>@8zq zPR9E+=yAmnF8-k#x;H<@w=17<#zUFbE%}SLL2aCF3gb!snoGZj=}^o$#+e?IgTE`^ zVz+DtIUcX%$i1Y+fy^k*eWI;;#|A6Xe*nIJ*~aXgZYRTPZ>@1dn8ENE)4T~ z_j6LQ2Ew~g<}ltelA9{qg7>~0Z>D+~XK$-f%{gsu+@o6V#KdvbXLdVU*0L@vn;GqN z5z(e>C8)Q5;ItXPB=#NyiQVxe1Kq1Vn`J< zp(JuH<;C1W$O2cYv>Zz0&T{=d>NK_f0^SsgFnRJm?3|K^`zJN2(&h!8%icxY)fX_8 zy?{fP;?U6`;28i0RFn#(cdl=%ALhryqXzg88Dur#_tsl$eh9vwRdQnwG4|~_MyGajn)bO zu-M*+79LQbndxB^_G%(*0|!%c{dU|L@|*j0y%dQL1E|Jx02e zt8yP_5A{X<(0J}kRF>E;Oad`7FCh6#pPo56a>l+h(4GDSQ_sAIN4`GoZtuX^fWi1u zs7~SgW)pwIl74>Lg^@a2Fd(A__L+MTG;|q73;K;I&-uc|? z@xe6A#*>Qky6|!aV@r#gM8RK1(N+x(%P&e`@q!{uEAqqd=b#M3CP-Qv(GDL;I=4g< zUqlWlm>%4Gr;Y~jEH0z6N=JNW3-MH>8^%!%)5c%&-$v0pi zCC{%!zSxBPMYU{?lZeQ^wse5aQnNFrQOuo9-1p6iNML#KwP(lBi&ewuyG9u|KI{aX zjxUEvz605>S&aoTx=47qRyZd&1yRa!^lF7J1?S)7y$a=N_~2QB**UwRs`Zp=F7r}<-r>5XB&m95a8ZUV^QHd{*7mLZ=Oe3V(-Q%P23=4e{z#ns<{A$xcknxo zAk4^|K3$N8gdRs}tS7w4L5aR;E;8L{e$$uSHYk${H;+DP+u_%H3woiOjcGv^2ui9!t_t%6 zM-N2&qZ^!#X#&*l3Wyy)fQ0?Gur=2gx1wXXEgRfu-{Mwoc*GCJ+*hM?4>xXOjy!5F zy+D)O1MFsR;?+O?qW-}wGTy>6GE0}z=r_8wQ6>sbOA;af_#Jka%tsTOM;cg!G55O) z#pkr5{h2Y244Z?#TdOL(IgmP=DII#dl&+~ig7w4q_`Ob^a+z0NXTn+7*N^4AmM{jw zI~!7{G=U=P@MVs;Rh_;gLK3o~llWmWqew2< zh;)-xxS`e8pjsNseHm;@#zo%raD;$|Z~IW;SpgL}3nAUab^%3>bYoWt$#m`EHnMwS zq5EH?>aVT%$bKF*u11mV@;f+nXg6k+S`*j12ls-)@mpe*kbh?l{8lqYjdef9Q9Hm# z9@eAnHCBSS@fJS4x{OCb0kqu1k7|l6h5wijA4~thh=0i0*Rf1%o&vosb`t)MTSWi$ zhDvt4!?@?8`HtCUL}sq|t+gJ@89V*!eM`idfZC0hkisK5QasiPmwOo%?~^}4PvwB{ z*&qc9UB>kK=LhcVk|AQ_FR>W1dKDbaCc(b4n%i}01WsSH6>M!^;rhQo@!pT+*f-pQ z`p;IR1eOEepqN2lKF**D-BRHSjw46527jiwV4CVS>~wY|?J5PTi8rH+b?ohaWG0O= z8ji$V5wYwMcJxtaOrs*K{N+cR#_r}!-^Ov?#RY5!+6c`G4f<~fzTNHz*W)^qUVdYL zXrdGH54=Hmj3)hHuB7x1N0c`3xSw?&&vt6iz$_{19GA#7r1c}|?#Xm&h8CUwaTG1j zkK(;;Gqy0^K(%`_@9#g4YOeL8i`rlCYcU5)>;=EkgqzXC+>lYsf9$Y;7N+08nM?1W zwpWu(-)mDA<7772{Njuw9O;zaU2&|j;NPny0HeyPLhUfL%} zvi?kO(gUtbZme*jREfEz)c92|yKt&xFSoemJhbzbsBTv$SDSf)JMbcyB#ZxG=`YrA z`cqiZknTp;1Sdo}-sH^dgRv=t&G1~96QlPo0X=9$4y8|eC|6Vu^ z42E4C^S_#TQrOE)+`SMF&+uXSSiso)#*uSH;y%ppD3cT#&MBdfT76n1;@p}R5LM|8x$IHwi z!2VyJDylP1m?Y&(%;y~qI`CR{j-Vr&0(DnYq7Z$I-aDCoibWWm6hW0HyOB0<9p||s zoZ9mOgt?=CVZ67LxG1g?dCg9=vCm%=m=2)+^U|sQ;}{aT=L;&sN)fWU18x(o@%7Y6 z6doQ%6>KMy9M+%gQY6X!a4;<{=Abvnk^bG-h9_^lVUvFnmCQwTLH90q`Q=T~2kq%d z3x11te>I4fy@r%mKWq=1$a0}RyvHFgB#ECuG|Z58OjyMI>*#|wKknkT(1<3Mi~o=3 z@n2WII!}u<**?B>wmIdo-pa(}OsI?MAcGa~c3?BBxCjd14QX9y7dHFFU^?TLJaak7 zS<8%}=aUj}@KhwF%QfKg&_DRxu11$U`w?GIgRwzss2=7>j$_T}@3weS{GP{6NgqIN zui7AeqE)OYeF$G?J5s}{2RQtA1N!{bMO>UNC9h89T32rrNih!0lzFk740jEuX6w-V zdtF@Czd$Ok^dsk$%o8s3A;YddyxAykx+NNcFYD#8Pi8Y_l#FIG*>SXO^%E2|O3|rM zCMG#*i}@md-2JD`t8Z|p?FK_}Lo(xk9qL+WZA-Z>cq>m^JTV5WN_6|@IU+-J2sZU zlJ#x87+;YO*vt#%A8={lZwzl5#f@`nfrd&ejJyi?$9-1Q-SWFA-^v&Z2RHGv{9^fE zkrvD$w+{x=8uU$2#(HT_id36I`+lm^;V{OEc;Am-w!R)dwt>Q~#bJ={cBBP0dN|I$ znbm3Q5g8LfSMDSu?-$2qeUInvX}Sq#^ig!uW{K<<- zZpI2Z9xNjn_yONTR#VxOQusxAlcQ-nCaqx|80O}+LpTLJF-E3_6|KL!9fL;(;OE`D zI5A)hr9Y6ujqDvADSzM)r z3w54wV6%G%sIjh<^95BZzH$@F5_kDTrTx&GZBP2)l9U^ngo(M&x$>QkLQ48Y@!>-S ze6YoDZ0=Ix%=?vKc#jNe7sYU{krTM#TjmqRci~v}4@lbSbI|vqUl--kcAotkr+Of* zISF4U*1IOq;TrBd5&FH!zn4x ziCSciAkAKhT;vX-&1NAmZ!5;9W@F@LE9$fV31-(`#Gs3hkhV>LnNuWotgz+fliX-_ zDa-$e%0vdvxA4a08JDxph-8m=Q;7E!YG3oJvfUs}-+Zzv{<=s| zzoE+n*H0-pu}Y6FKT*Xhw*ZlPxD#~7jlh&$8(BtS7dJ@X5X&%J_>VI(#90s*H8T&5 zl{Gp2>_%^=4b=vx)8403Db)Iy5V?LoE^E9*pAE9u5txPL`ZNA@$+BST>5r&oiRB1uYmmXE`2kt3U3zI)&@x;9dA?l9xM! z#b=qvX}cOpm?1aA)08d>_`u_ei%36bXEywv7Si!Oj z%uSe+IvvJR1>DQVRctP7Ac|7)<`RnXFtKQ;SR&;#+Jo&##&B`HBd9rht#CBrFcx{86M6L3aV`5MQcOlJMyA+M|e~y^`DA6T@HMT zsTFl^jfCQbzEsS1zTeKjMP0rP?TmAwoT7C2uN+6)%W818CJT?HSXc9e9(EYIkyC9i z&?`n7W1_4MevJE#uV9@!L}*ITCg+l1fsbwFe4Fmz_>z9u{A~y=`RPpSm}@+>K#%6e z9LCQt6_~7LMW*|W>B@RlUgM-9ZnORQAl4696n>Te$Xo>VI-Oied?|8zK8rRQJJ3Cr zMVGpl37spygu;w4=m{DW=&VKgOKk<6-H+fEJzg*vEJmP-3Z7U7&^)PNGV4_rj;OZd z^GS6|N*%?EUv#20ybndJix5;cttPj~dZ=AtOi+zsVikz~{u=rBIr)EF81QSOL7 ztar8JZY)L1s*s%OBRJa`@b)}&!G>)Y0sMkIErurV*G9cv zh-kUJFP)w7i0{2!jXRUZ@{wK7ah~}R8t>nNiLNa@ez%>nmd4ZWueXHdZClxQR*sCe z9fm}3K5n%6P>TaD9~@ySKPSITJ$}CIz?o<)5CX381*C`nJMgE-gy+G*SJ&np4F6KWKYo#|3R-R z1Ceuhtp6U#C;H5%^UeDZm%fVLd?Py^%i$k39LedxvN3cT^|*R_-V~xj&vNBm=2))=&NkWBh!S(x#bvEX9w8$PFIn4wZ?nL*b}9pXZnagNe(9 ze+x6Y*_Q^=kXy#sv!CS^eD-2}`D(HlD`FD=S=&;(qR7TKKI3DF7uO)po7&7oO0GSuDIy{&i7ZK?Ij-MP_i2mnR~g}#dTcN z{7K~S*$!vQ)aZHRUyROO%{fj>f=bZ?tk6A(mA4FO7t7)rdQPDhc0P?QUqC$vtZ2yj z&De1^2Sy$i^yGd91{pd~SJ?tO{Y;-?yE-vvi5HHpJc~tnWxRE{KlQnD2#PDBDBk!U zrkb6>EGtzih+zHfe%oM_Y{bpjphZ)(H0Y!G47ksYpykD?<19of1#?<;Uc#m=7F?Lu08Y?e2So=7QP9XLI3*1vo6Z<+jcz-4R+vPCqoqlA zxDt)qeS>>A+n1zArVAa$JbuO+aUX3=FvV;dUFpq0??rox+1U;u#)Q+1NI_qpE8;fR zX*e@v5F#$CupW#r#Vo2v1KXAUR2WL~rx&6y)|U+3-l0)x4ju&EghT!S++uUf$}|bu zn_rGAFLq%1rADN_ZbFfYyKtu3f*z0()jHO2digi8)$%AOK4U>*)nW8i@eb4rEr`vF zk+H2717BLxvK{I)iuIF+MSkLzv+R2493=`2JHty4kK>|ett7k3d)V-Dl=wp15Smw! ziYv4mpUdA1vt+{HB497 zPb^01e5v}tLlhPQvj6ZEB~=59Qs^|Wr~dN@8cr{*y!s9EX(LucyDqO$nOM|vUaQJb6<+Bwr#k7Hwd^(<9 zT)2P-eHklYM?czn%Ly7T88Dq%!!M2KL$mFCaF}r+GZ!)D>Whacv;Bk(nj&&6SdAj% z$s~R21J1FYT6bj=%9k&LD!UuiD7K^Bx*aOR#iE`5N3c4bvFc`)3!B`uXsK(YFeiQt z%AFs<^0flGe(KSNAX^gHygBb)KkAC(vG3zWtX^+GzGvDYx}Cx=Ii1hvwff`HZ)LhK zox%&sb4b2@y(q|~66=z8b6ZC&U78{SsHX}Mi1mxr3!aF-(sxC zIN|HkCj4lQLHMY#^s|TPeE$OeXU|Kxm-MAip%3_}Yjo(!31iCXuoOaV){#H{qW_Rj znEB~9R}^f>?=&+<3d&gC)B>g@oW~`d&!n2&6IOV)iIB&6s zTVL!=vtp7(WHF9{jrU@qV=8IPX3*eI8bDXKEv9% z)(QjKGen8Z=k4W2A3u(rhURo-dLi?f_|uBz{i!l1jl06!I1(r3QDS2NxpYUtr+Obk z$c>uAmts-Mbh3V$K#g%kK8L?!fOa-k&t_TwG5>h8bwPA_(?htviz0K&LX3U)oVnUm zX;{l|#?3j*^0!HzZ+K|mFQK3}?~%s7OaE3!b3OUR+}Feydhoq3MX97< zQ{xuTc(xlIFRd-8`!2yC7skA}@=tu9y`{Vo^eA<6EjP_)EZt!Ii}hoeo2u5H<}{lL zYk!R;?!KCk_4o+9W)4Ng5ys)n@gfDI>yS^JLN5oYQKX+ALXPGlefLDZe4R9TPgjE0 zCRGY-7{T0Uj0f)Bfwk&~_<=pk8OLY_RkEG*v~OqFJMkuNr!B;&`6KAm%%Awk@`IbZ zi@0K!Qz)9IP9HZN7VJhEQ%r5J@L}C#jP>on>;_X5xN6Y7Mnj4^UW4)P4e8a{>!>L@ ziED=qNq1~5rZb8R2MmeWpF3nrs9A5nbuB|KH$#ohU4MOP9|A?+9Q zAAS2N&_mYm$zgBbQY8xAn<$(z?!=7&>xG&*EHm~y844x7bkT1JT^wq2+{DfmpI3~w@z}s1`K%qarjE$ye z=}9CSAxCRQRbj)B6k*cqMC`j6BS>~ua5DWz(%Ae0&W6pQ`+YiqWFejWRF~qG-75ao zHs)SZxyA)nyhOFx5N^r711KHtOBS22;X#xq>9y^kQ>T*{f7eIw{4*1i+=kJurRn5v zcoXT{IO=e&#wkyhy_w7yjej;%(~Cs-8_83tvLD>XGyh?$23EDW)41z|=~}wjcEbd2 zq2G`*%!-^?Z{?Z#32yOEOU`HBDk?j+gj@UFgbJ9q;@G?pZu9pYm}2WhDe<|e+vHA7 z5h~)*NG-22sJS zCrG+@o$dTTpmWqtDZD>&mWA`QYYnvVuP)yfEv5sdt7!XW#toINL(jBVDa306?ZP~;VT_dM5-rR5ur9utWtOpKf0ahs4S1d!OnCh zc^_G(#ZyjshVZ0w3?{3((X+jq5VG?IG@JdY!~PAL&neUF{-?00F^XJ=Ovji#O62@# z3u0!E$0U(GitNWwq+vX=57u&F_hun`>O;IfU_iCv+mI@s%mw@#OF@o*=TpDZ--JAFy=B zMm%)9%1f|2<=kL(a#{8k{9|wMj5T&OdQtf=TOT@Tuo)>9%P6dib+#2>AXQO^-neU$ z!KN}OZk2?2nLSCw*-+-C>4<1(;AU1EQ==1`KlGc!d)e$jRf`L?QV$HCE``&|7hF`Q zhj8G>V8(H-5lspH3bk5mzWGWS27T{Oi*Ih^hNwK|EX;#w-c;u1^pK(bx4T8pe|gaj zWm)`_?%=vVuSA_F2^-gXQ;zsG4xLb;ti#>#7TaRUs8z@@d+U)D1Df%DEoQkq#(9a! zG<#DO@-7ah#+A-wlC%e=<+?OGrX0g!0-(L)47!>7=VO2-IlmLI>_!#-gtg#Edl`gz z;W!v!%g@htr97#Z{GOiUT-@oqSYg=0eXzBpmFzvY$LbpEZ7}~+VK(L_g+Ot&5$)?8 zO-B|savd?pMLYD4!>zTO`Oi*?-i{KHyQIK%6g9zN|9lMgupyNt`S`PWG$!k86XL%I z;F7lveT(Zuy5FUQ;c}|fhdGTpr~v_2((rRV%LR6L(SkWCyjougy64vmqq`2A+x2=_ zjFYA-Rtkc%?h=}cYSdnT2anEEyvdw=K76<(wun=(+{22@e1_n?tOsL4hS0g+{b}f@ zR~WlHkyn!Wfw9lBneucE&VMtY0~2hKpBqT3%nQ8e`Z7|C*$C%7A4Eg_7gMmxB4Lx- zFBEAX6Tc`pjVmi0Xd9D0{`#d!GsdJ*W!nTAq?#v8@<~Vc$$w~?o`9OE`B)AI+IZm~ zCVkYSI!%^OkeW@E`l0aIIFO{cbSMl|!ue_Ekowu5<~&wI(E%my3*(BfsdbZ{-s<_C@$>MiY1|T%x8M1F_kTLT=<{le`r+?Y(y5cjiM29N=wc&dOt&2-XZ*>}Oq&$Fj=w|GcbRb2aB@~%p!uIMPVe%*r*Qew|YNI|kfBGa! z8np>){MJ%M!yOc?_>Sk1s&sRRCf(|3fY*L`1hV&9dLiSVMr|OiwFTTZ3kNEG^#u=> z81feF`(apUL&}{>0!Mj@vML4qUR=ZHXKjV$qsnn{*$vm!;AonC*%8svDDx5BW46hQR$d>P-^wS zME6O!w9cN-UgAPG2eLc${0^-7!@4fsl_+;%bD8X4eDH!4gxFb7NUInJ8FvI*OVE95 z5B|n7*ZZDl7~ozFx0*%>^(U~4&AyKt1@d_-m}_JAT3+$qX6|ub6J$(Hu~EX5GB`&X zp-=(2QghlKw*vC9ixC=aPyfz()3n_)$yi333wpU1s_e~FB0GtBuLseA+@oCILsgg; zz!)?094N_pHx8+*DggY+sVYO9@AiSn5i8#nP0-GGzBdk|;#VkJPSh5_Ai4 z5FX!(>VXLqU{``^yIrXNfi6r=R;EwA(sZe7;eR`Mgsa+8(%c+0XPL6RW;Lq+_|w-d z!=RpZo-<(#veI=GSUW+FBw}ju{)!bE8!Nd)=47rGw4tokg_5&>Fk(w3mn$;_M|3_y zY2a;yWXjW9#>O4x589i^Gn4A2Rq1}8 zZd|)>fTNMS(H3FHC;E<~7ruLN>eUjGZ~BD4eIH@ngT5ro{M}^|MOaXm$<;p`OdVIP zDPjLG7;j@c_4(SA$L>NgmiKtyz=IeN=SGL0vYF?mE$ES#L|(PI@NSQS_?Ub@zNz;! z7L_gJj|Nm=n1n9H>lliHZ{~APPtKxU!`iU>V>{A{CU9;o;^WH48T(d6Wt1B=n`1KUn@P z)PQON`wI8vJ7C(GjT1?eNPX)#+O_>7uUhZ}repshFZ?05JhKLeV&tf9pSMsHxRfM2 znsNB*7pzVT;g7I6?cVg~Tx?`4-sTP?*@vcR91pT;pG#s%9pbLEF&DvVvFAK#GM>6u zsF}6{N%98N{7w>lLX@ZVqvhZQKX_yYFm6^DSDs>pzuwb@n*~qtX3__7Ad5yeBb|%>`fsvTUJCyNkQsHmwwJxs=tuXj#?hm1?xc53f(*X`Oyu`u7t0~A+hQZEE?mLy#ZFZCxsAI% zRw1C`2EN=f5HsHmqsZ4AvBGyA9g1$igF60Mk2R!IOO$EH*Q1=lAIEx3=XvPV?liZe z0)`=uxL)}WBV$zQvF%prhdvJI)f7pQ+;|WRIJX0kP&&HNNh z$I_~~*R2ojTq{Foc3Pq2Q96d)^%A=3_2}c*7}&R{QP_Y$s`h$@yumtT@(#SmxfGoV z+*7lNchIA2aW_^(%3T|*_a96dxj!-T2In;?$Re;p3c=KX@9uVq7v^z+W~yk>j$w(?|2U7sKZY|1u7`{Br!1m=t)Jk< zxeMl|&)B7xD+R|RW(d1@6q~Yo&|UIDkX$^q+b2^3sZ;#feK_~`p`dS(fh_Ahanq-< ztb+StcJ&>`uALY{dd0=?(aWaRk^b1Qyjs#nu|M_xutP{#@e;FlpC91yu@SO-rg1^6 z!KPSCs`-#d!ic5hYnLqs3>k*a_Bym}^HDtgn1?SZK@@BF8dbHNk^J;7B5yCI2^Xdy zX0`>*;q25v6*)AF@_^OiiTwRA3Kdm-@$_;b9=~{s_ZimYaAXUVPnWRrJ-yiqjg9PO z$xWulJJ{nAWU0I5S@!x!1-@0u(TLsqFqw1tyLNGoOie3$w9uOzd8a@(&xZ_a6FJXx z2VzPcC`Tg!2f34S^{jNxu;uySI~`aVvI#y#*HQlXt>AlUEHx~v#pE{$^vUQLEI3zk zLvLf!oc2;*lN&~A(E zC=A|%zdbFebD0h88stl51Kow}+}&8JYDdONjQZZ={oUAg>=NH={Z$TQLYx!zn{))8 z!*XCY<&jAJ7U1{_McQ&li9f4t!k7ymq2d)TE>CQQ#oQ?FEeW6p+1}Jo@w*WJu>}$z z&P1E8%#2ts+CD**k|Xp*SCb^VvhXezShQlBrWUTJR|%uu`GE0zZm$Yua+vOoYmM_r zMNjYn& zWlS~apZn23pQo@$(V?jaJ|oE`os7qiN7FTZ5+@hp+2+^mjg2h)c`jn_YbU((wZ@g+ z`FN=K6B+e=DD{gJo_DS>ZwnRX*=rK58M4m<^!LwuRKL<1DBy;-0ie$^`1FPZ#BvHgXPFHLL&o+;2NZkjui& z_@>{NM*sSO|32Mh56Y^bQBfd$DB;|kYDd`L&?d(cY^(18YWGopy`sos=l4KWE~Pe#=UE zf7N2zR@RwhNF|8_sJ^fhNAL2^SN1L(?fwWk0p`@?YCs~hqx!u$5-p!gP~!Q--ve(8 zKlD`L)j5SCM!ZC$Mv0^?lCv{|jqEaop3Zy&qD z)b9+Uu%*K#R(^xn+N&9mm01Co&^b` zVydYv)i;!o+>aDOO11dF&>7M6V4m~6QJ;>l_w>Y1|v&mWfa zPDU$?6qn-PTXpg8obe>a>ylFXOXTh72h9hiSh#9);|!$%G&Cd~x7wD_l(&1a{N5|D z{i>9ru0ylKk6^!d4zmy9t|87+8I?oW9;<}k936}(*yf$tJL4i2Nui|@kTgEL)&bZN9sJF9;bf$B}4CBOJiEMAUym%#C4mbMEIN?mBlU2pRC+>ed?`8Ai9!nZ91W>kb^As?}&HuS#n4(8ek_#b;~ z^$(tF9J&-pd?+lx>Pan=WJRg=7S^rkJbJq~v(+WNXf=1R&Rf}ldDFT7*#9PWO&Ez% z4HLQ??ZeqX9xTW^g_UgGj3MD_MBl@uZ$>i;9C?7y0FGz<>DJvu zax>?-q^<|(G&=0FsrOGj`B5Ui^_`3Euk9(FbD!7NjikWi+?}nHLeY0Cuw$n(n-dhz z-K>`4rfc6|bk$RM9k&;&-1?Hua&+0j1S2SDrj5}y| zy!!t$HmB#v=z7C6UY=niGL?nqLoT?V{1wsXjA^vVRmt?(=~$id0$&C{z*K&pJGkx{ z<}NqHzys!#qaR71?%42|N-2yAGvO~@!tJGN@vN6MO%I(+UV#?Wb+rv!x6Z>7xl$P9 zc1es!2a^h=A?Mvn+PPvA&d0yOgf0cTe}QwMLT^KV>JxTH)`!OFB4HIGzg@AB&LsSe5oTT?Bg= zgr^fKkt5b2>6{Ba4t|EI^KW5zJ5-I{`v%<1gTv)CJ z6LsEMHF2S%PmiNly%nv{yMQc%bZolsK=QL0nI?@SW2cpD;{COV(l?;vZ--K94u8kq zTf;V#)nH7-TKpK~Ox-;S@Ya#zdk=3C_x=fo&mYcg@##saF-^iZO?8SJJxgrcx}W#; zc4G2?38dGb?=5Y%2vZjF9m3^r{QS6vHDvL*-0}bL)3H;inUz8d4%~)h-#ZL@sU-|D zIW24)Fon+#4?}B}9@Sj>z>c37O!HsQA{EB>BwtVAaA~&Wm3J%N9Lf}X+NERUQZE`m z-W@qUV<@yg_eI$yQdswu5LJ!;=f7lW0cv8L(jV-3e@*b5wiPcred!4A5!muw)Ra$| zG%i1aHh&HfTjloQLhM~k?ntA?fX#?64Izb-kD=41N!HD(bYCxyI&vHlHQ$Q%)~~=o zjSysXFTfkaa5||v5S`yovW@c=U|P#PEY@+L>!W$+X0bD>YHl;p!;KOW4PY`$j$Tbw zC7CbFSkSdVtVn->y7!lHLs60DZ2N)2*O7F{qc{EHGpw!QrgV5hD(_O{<4foxXbc>P z4&FtcYY|J`6t(DAdRD zeyPuw9851^69?Sj-S5A`cgt7j=q$1>5sFb}IQMbhh=Qp~9^Aoiac9ec5g9ZE^Xjq&3?3Xj)Y29?LwYZm69wrfdy$Gxvc=%4>BTY4ML zO|n9s{3WO!??Vgil_`+%`+&riN=Yy}JNXM7o#dEQIgctp9FV*r) zhXPNi(`u_IYQ5)*X^P(D>aiCC6RdHydoHT>htmG&)2ybefu%mL!02_Y$l72)B{i3! zR93^H3Z|02OGl%_k+GQdf@e7R+~UxZd(3-8E|!f_p;Pzj@w>*2G;AHHe#t&oHpP;f zAIu~c;6UGx3`D}4axC*RqX4y3%)DLPI7?*>9l0FL_ZLsFt{@%6`6(@Hdx=ZF4yO?B zYP|7Fq&cIDap+<*CR|mZWS&ucJ-C^(DhIQ@9}Q_`gBlHa&1cSS{gG1Ei3hGG1e~pH!lbqSc%MUCfRh9U#`x)XhTjjR`BnjLeEoB zzAwW)mi}aZ`#CDwIBVEd4hiNBFt+SL`zN0e@BZMsi1EF}AKQL1U-|#gFuIHNoM%d> z{aq=0WD9DFv}sLn6NZ*ugqEHOHJA0G9ge4jXBm?kySEo`E;Zk+m~RnMUFTByDD2^*c61wCAka!smQ8WBm?(x}VtLExCf< zS!cMKUBkFgHL_l9jhg)Fv~d0sO1ttFKQ&JxJ(6SUSEmw%{Aum|O5iimiD#J{( z29weh{{0JFLqlv_(Clc)=J#}?nmzr69F?DV{o}+5$=Z{!at$VBlRcQC>O`;WN~mOR zI@Mgr6E`dzfqvoKThww84M&U7Hj?PNx5zV(<`h+Y7p|%c>AQJ2Uf{sqc_2Qjg&dacf zTE`CXuAMBOMZc2YjxrmfrIw!5cjgWjKz#P`E{=HlmOhxzzmr}9?>OjSIlsfrJP=4=%TJ7 zIV|OTp&1&&8>``nNti)3k_N=^eCmo%dvNBw6VB%jqK8xdBG+1m*iLhpM)EmytTv7O zStUAJ$WWg>{-S#6Ll%1d1>$tv5bw{ung1*~$LubuhH}5DsT7*9UdV zyre;Q<-Z6oUw*>@ugRk8`x{txeGnXW29U0~H%SjY5T0`0Q281I%0Aa1S;y~x!3JuS z<7ps1{=Sg+-_N60^-CP-+YJQ~_k?`cVQ^D>iWkEa>D>iASe#2HgI}qXuK}e~XMezAo-^6sdpitI8;i~9f%G}- z2;QAZBH!A2*iXEUx123Cr>JB#1xJn6!8CF**= zlZ}hog+RxxQqNbM!EDO;d3`h~W~M*In0PGsf;P`QhEv~`!1wAE zT%P@cO&t_ML;bEG_y_;(mG(wW*(KV-nyi=3hO z1*3+GjnlkL>D=e}=(Nj6sQL%dy=fMvJ+YyyClu)04qI{Qzz10LUoY{q`VY*m*2SIi z!|9LfAZqyATkQX88~%&n3^RpPVN0S3xlJ~q6K_Y0TdyvpA-}#t_0&JK2AIIL+K)Ca zjKW>xQ+U~~L7w-fp}sbbl9W>DggLyF}>mOWN2Yh&$(e3OV zz&+*g1lQsX%8`OUb> znhqbvlz?yC-?tC?XWS?#Rgbhsn!u~O4Q=6GShEmU8ZjaaCGU#xX>u<*Wgm_Yq%Qo( zOsAZieP~C-1AO3Y2pWG1HEz8`&%MJ*cFPXP_e~=0CMhlj)#Ka}&hnV4#ChpQP&{i5 zGwoEThBzGx$=U|v+8-=;s}6OPcfvJtv-I2%TkPVwBg;+&`t1>d6#D{plw!oFkpY62 z+if;)=oT1G7A12->Y>}imiFsuvv>Lr*aG>%WRuj5cBL55pObT0_VppOwoa((q_}XbgN-c@ly|No!IZO2E^G|*j z(xtI^1Nr`OCQ7o#&>Xw-m|Alf{V$!t-HAGw`_rEa*R(_a^EqJeFl^m-2>rIoQbgG% z(X`_a_N}uQU+ep!ugzl&rMK)e@41teBbhFF2+1mSa;`gyta1AxFIthGvnhSNzEWtY z&uDzkpV?d|RhoaGLV9@WND4l#i5vCYp>gnwG>Yd2t@e~d=kYq;kCtmXb88yPi}mQT zbvK&ftt~b+bm7hNmieR-~TObi|I+fO|_}b zudk@8xtP*=$f9pQ@J5Z6>hI?F z1_SX)XffKyZxCZ7YAG>P{BloRW4R|)KLQG4R3J0#*kXvD$%>e zlr7ZQ&NAn$Vb}NmWGiM_QS}-{nt0?0`@EB)w{NMFzv-sit69nsPi@Iq&kX`u=zgKG^!QX(++&877 zi2rcQm!H}H9>PA}=N#I^XMt5_l#t^`B_@G>qa1hR;siq)&}|USQyoOj-XGX?m+Mg6 z@~&~$U3dDvpJ#Dr3T?HZ?wCi6tZGKR+KgXq`; zp1J$dCiF~w00j#@x?=ZGGQZrA8ad1P^RaJ25@*$$o4p0OJ%W|F8k#aD3i0EHpwavq z{CJKv^>{e$B`&5uN-OE;Lrq$%SB@tQ?&4IJHP~}{nHU>*gN@kfMrAMlV;v>Ybbj9+ zD08RR^N+y59Tw8l#kGQ4R+aFz@I4-tZxa$Ts$tK2(7k1jp{If`^>Ztvgw!-%9oQin zeOZJP%Ce;OpooH#HzL@CbB8qd@y|q`W|?v(>%eqMSj`>5Yx1EdEv@2=8c?9{BQOvxv z9#vP`(O0(;mtNYC(`3Gzb&Vs~xlu{uVj9J}3*ocFk;J`6Bel%v%G&No3w5O@naju} z&xKZAdWx4j2czqeh;<(<#N(#?J2ATf>64Oah4~}AD&3C@`?M*F^HiQXRlsAWJ=2-l zohDC}Ck^Eyys?>1UYF(R!LM$law1+5-#8rM1EOhwTMuf)U@U%`&1V%ZqE)?u^vKD( zEa1dyj5-p@zB^yVa&JpI`?*S@Gi5)!IdL>OK2W064imLc2=jVZ zLV5IBF{$AebK{)0n!f&$GtXzT%ays1D#fDw^cXDP`$AH2?9PA{=j{Bxp8twjpS*;W zQ~?gV-RSqElc+Fsp}}`5DM)=0oqsJA;|~qOw&^`cwBuaVrByg{BZ4CQ9$^+|o#mBu z;Na6_QvDbOtzvtc)w~{xac&5dreR+1ndGq$h&t+l1m69;7x@Vvf=y|BL=A>JmPqq_ z#?tPW?)Tc)r?%mK0Y>kNk5- zoAp@Qc|naQ1r9f?!OtDJM>ZdD^wIE7dvV{{ua zh8^7*NxZ4!e$W)%ezzO$R_cGwHzt;eI*orRK=U~4={hC3jQuPqZZFT6jS*G zEloXX*6A}a;r!>yD?RDpG-HYhm?d;MR!E(Lx1z6;7A;O}65Oq))3!+k?AwaFaCsHX z_J8d~`ujIxv1}IVuF5pk9bAO!T}Jee&(bexD~kR}?eJ`r7awf9h`MYUP+EDvJnk((WmN0688^WMx7n}b4>h>lmCpdf98B)#taRNEp39w zF-4kvtq;HR#nRuUDRfi)7s9?D$MQZs#2aBLh+kDIF5qsFIf4CXfvzjFO$w!*8LKeP zJckU*osm#KSkfnE09AL_lMX3u_dP-u_X(PG4mboUQJfw-ns)!t)moY z`T6AKhNJL5|ODTRdR2| zvLt1inq5GVXJw(p_sX*uYtTjG2Pe0;S>jv3RQi5}=TLi>qf<4G-5b`Eto-D~OWk$g zeytYm5fE+{u0@KTA$Q_TXR?T*bHXSx$$F1E4+*JESSu7&g_B{_Kykd;PIz8c!mZ)v zY(GD*Tdpq0mYhDGESug~ z-xLc8XN0ViTL-4H853UaLErAdn6h#h6)xjDnTh=W!CqekR_(&ovps0^fBQvy`FDJ- z@JPr%@{sLGdW67EUT1AKq6KTssjt&*^h!0O(5(XUY_{MC&z0OzwW6KtK=u<1g{zI5 z@xs)MBGwu9!<(?W}(8}B&Z#<{l&w710u8t)V7WVdwsQYTO8Jfr)uI8Z#NJPlrN zvqe?AOYEkhEBzgMm?=r4XkSACZjN3{ts^`!du2o8>h+g}mS9bxdCEoJ)m-Oi&u5N9 zYe$j`|8Bl5@*r*BwRCQ68hy;zDXus^1MjXXQvRVl+G16UMiU>3f3y#3E#_3X?iLya)G&J)GN@<{W&C@{l%mU`-a%qPvm1RyCe4w4kMhPIO*1lGgA%?j-AFl*aGB zYljbm-=wv8k=cV*uJY#$TyL`VO(Bn00d#A92SUv!AgKpuUtaPSBkuW=u7iNpdy;9> zM4mm$zJ`M*w1}P8q>1($@o&a-mOkE%-V`ZQmqQMGm(L*$$#rbrYeJR^ryHm3n}?lw z(R6^jn-Us%4=PlLc^v|o}?}id-lDEci#`Q;|UTbb1{lm>dZsnmHu>l*bnS)_{A<6SL3N^ zZDY$g9Xe#H^nd)&Sn{W}&R;N9TZ4CE#?qT&JACGu=31_!*d+Zy7i<*Z~KzLq+e(a zGozg=>oIrfO|0eqzngQpkIC1aeAb(?7N`B_ZLLG=?ih%%d*q>I6ieCd?+{e<%eNxK zoLYTyct30d%#XeouWU$#>u!5Gv`dj5UGoq-T%RH*OIPeY?Q z^d*V7d^3hJf>uz;KY40Wyp4vGtHNs69Bj1A60@zE+3#r*a*NQE_^h#Gv3XOVwT?Rg z+@?Z7^^4@kxm0Q2SIZk+=XIjBV4PIf{WKcO{3$w@J84TCY3QmAbo4c6jV~_~Plp@9 ztyGWPUhKxOWX=hTi6D#W`*7I9`I(&m-#%|C{V0ya@?IwN^}!M(bw*&+K^4qM8&97z zO^|cDgWWByz{tP9`Ln@i%Hbu5b?GKZ$4ww_F_87*&$rBDTQZ;i7CQZ9v2A??Li=}M zkL6xiX!fS=js|q{EVCoy2~QJlSGMD-;$l%~B+*gpE5G~v)$ zOx&zTzq39yp6ouE?B{)EhI9iCp3B*#Oe1oCT!9a}a^btST|91{jP$D-G}T;=40F{) z>(L67veI3wDSHbQ8FQ%j8caisLnwt^7OE^S;!S}nSzqpwUJ5m!!^72S=F2GY=HeCf zm2<;dmb}E#H8&Y)7YLtES>wa&V^DstODAM)VaIpS8NU~k;w)u~bK8Ng&0mH0xw&v? zUN1g;(VK1f<4bL8WLWfj&Teg6kE0FQ6z${3bK`lEm;d?E$o&U}Mc3Y;@4gCY^Ui~i z?((HuOZVc_9!FAIluwiYte}ImT69m6U|YL76>k#orB@l23<#q2mG>aW&;M??$N4!) zRBoGsnob*vVJq<7P8siH#-gKr5*__K2TOJuK;zRI4Ayyp3Qa4zxT%)^2DtE?!w5E9 zsf4*Zzhf2Ad|1Mn3q7`D=CO&=S3>bb+!CxU#+ zyV1Wjxp=*364STVqvX0H!j^btsK3aErv4jg)AkCOYnaj8pgPIgxJarr_u<~X??_K| zquw))OLwFXr@)h@;<#rg;4c5fFR7cNw6f#xZJ3MF@h4Xo0bEFZ~Br6D-M~2}>{sfBRdB~Wct#Dtv7f+v!#O`L!X_@^R zvotQkBTpNpU0dLBUXPUZOT=SSct^0yTC9Ae1j7;U5VB7LW}Zfr9cn{z_itj}4Ri7- zK8|5g6|k-{C*$>fNvYO{j^l>(*_5N$n9t8ckAmox_gMP7ZY7(Pc$K?7R@>q(%499M9eC{C@JqHIJb`waf3hq3;SDZMk%pdWEtq5gL`b1+w-&Agj4u4Ose ze@E3&L*Pu1dbELxgco_0|=7Qx0&1@7voS}Rzrz~DZ%<`! zd@kaA2lwBlIkK(sd)PpSF!Fw^LY6y?=~K?{#t$3D(NU8H;%?7FcsyV-TY9&fbgIEP z+RZ)1b7fqp*zzZ)*q&p~HJmZra8FvFp+}#mIbrfvZ6a%;j*;)6nW;>}>q6-8R%-;A zj-*>wx8V0N7uQ-Z;9Wv@1k3x9)&d#I*m4OEbv;nL+P-db< z1^RhH<4iuCgw<2q1Q?i8ds%A1GWtoKWKKN z(bc}#QYuira;@Mq-js?G;bz3g( z9w)LT+y!LNJi;$`)+m<0E06c&k}&b(OqiE_m%KO=?$;#wAdR{51&5YDlb%{7z;Tl+ zDLfG|ey9_Dj4Gj=C&_eGu2ih)u8+MhdT?j)eiS)v$DyJTq(0>_nk>2 z=Y+zWe;5Cp$%kyV4^pQlVy5~$3h|4EhL#zg^sU9~-)|xJ(~@LA*JE1uG^ylk1pOOd zE14>2;(WUeO}-|}nZXa4VM7MyJ$i!Dqo>f{+L5yJ)M(J67DnxsbnECus(9>3Po~XB zd_*NWU3yXB%K~hwN*B)do=<^1ZT98pQyeQ_fIS5#pp<-9m{2^1N`J3M594IIwEZlu ze!sgL4z{M_wIlk#|8%+?5Fc@{_U8{avV z>C={`+f0Rb{`?EqiiwA7k$+u@r9ZmFHa3pp=dN{#?(ItUe-y~q;xPL$XENT(_z8Ec ztmyvFB(P*nYV9$M)-UGF0nwUf@&42m*D{RkA4uz-ofp~NtW65gGE6-j+Oktysf~u zMN{apWC2jeDh6jL`!*mAReblrA4D_$~;ysoCfXZ1qbKFkhr zm1@jFHIR1a{Szcx&R}rxG3kAqHbh_dqV~Vn5G7dC$ml$Jb|{H#w;dN}k6Hy!OBFh! zPy$CjFE`u4y)pxzA#;)j*{ZjrIbt>)s?LLKvlVSJPvbLzKg?lT1}yYIp9jY4G%v!D2BG>2(LN^?T z)@LtzAK8Hy4R_eXC|mk)bOKGB=0mabGmxXc6-z!iQda5~Tz%D_#x$>>bJrc{o3RXC zv|El{ZB6jsnIlZ9h@}ozgTStMdd+k3P7@x%)I*KT>ea}#?g}!j6(LMjr=2|v$>V+& z?Rq3-J-k$DPVHwH2UZ9zPlsT`xlqz8_<^vR^_W^z$$r%;(8~we!cN0<#j)g#R40cU$X_of>f?+~eHMvKIJwutwTl;%Z=+so^qRHM)8er#e7|BIp3 zUu*b1-Ism@DALVEiU=4Vj7=zzzRA}o`j~>+hjNt7IVovgvV0b7O#=&W`CV7eNAiep za_Vyk2R4?W(44>1ej4Lw1t`S26$PCSag1mA&h~BOxi8N2UYI8S`C&zEPR|9!!x!0t zzRyvu&hun$eD3?ima<=7M3;g&ZC_IZnKxx<;4`<^q6clS;k$2?=5}@ zbtjASYiPXtd{VJJAkOMogqC^V(b1JpJ?8Gl>G7V_YJM5oRkHN-Zzp0{3f)p$#~GQ9 zq!wAgyRffVon!>6UksxD+rF}Y3Er%)XC>zGOmXygZe^BvPat|kzdSx zHs-JyDURlx^Y&3}KwTD6gI}P%Z4bWZ^rGe5L#sPHh9>-RrN-ttG{)MA)NZBWK=U?K zODyTmrfGOw*qhexPoO)c)+8r=25;X1$UIz!(+Ywx%rAt-$a9A8++=cBZ9@>>KlS>i zM<2LvXr03*to`TD6l(bW<{;l!cAtg}#Y77FCP%^JxVy1WnUFHA7~~X5ISSh3zIg$P zOp2IBd|z?i+p~@D8l;jr-?NZ(MO$k5Tnej-KJ;uwCCfE<&yGhAr%O(9lz&l^{_O05 z{+S6?EgCGy{#g4HZxiU}TUm_SonbD$mRN^{Gt%sdgK~1kbGXH!rh3^gogcxBho1dpL7-)E8nx{VJ>uX^IyDJcm^`3 zIFoJOZLI8i36D9W#JXb+bmy%em5)2fayTzaVRk*6b+e~%w(f}&86Xn7f?3TOPD&JJ8IAJeaola zXukShJQ_a(d#_qjk#$d6Hh!Ae`IPUYe={KCX`|pQ}6RPG7p?S)qP#CoV7b;}Q@$n*D`8b+OX;c0*!w?T%y>*Ra`=5`soiKO2S>Ab zM;W%}V1c0Fegcmts|jnaw!%=~f%mCfk-E={oaYzQaQQi$J$qeDTQvb|P1Nb|=Ny>m z?}9?gAj;5qi}TBRa3{SA?VK`;W>n9B%S?Q~0q(~CA<=UuOjbH_ z9=|@d#lK?fYqV)?ODx&@IMXMGD3tTeMSL$i>IhzqI+GE?+Y3vnzp)uT_H4t%+EgU* zuKUZ(zrv4SBWYJn3EE>*`1h|Kai(WsK1h$$m@+MHY{s9M687$q3O#7;!95IH(0sBd zj0|*WPV^7hXYCc@F2=!hR45hPSEOeYi}tKc_QyR@RGsN1%`Ho3%g5K??g}%%ytpk` zw#0_semcPlJ7nN|p7S#@enRn$F&UX>u|HM*)NGn54j#7$drp0ljQstI9ojsC*4h^1 zc)0`h%$K3HX`O6}Lk4cOdkMXt_T=5%c*mNe-0nP$%bL4zB%m7S4P|KK$7C_}k2$sH4ix*i$RKja zH9U8<9C;soKMU-*#Z+!d;Z-U}@*5I_ zzpqEqsaxIP6W#*3xG2djT^lmW-G;E(HPAaK+Z5j?1)HwuQi7ZYcQW4*yd?6JJI6xY zdF%|&iRi+pAegpjjiBY*?g?H-JiE*}QiJ5$rB7CUgT*5SQuvT64u7f6c*qN?w|vpR?Fe?uccZqugrx^#Y18gGbpDPmwf=6zx3#MTpFiudeMy<nUORu$qwV3F0ertPe8s@mU{^L&~v3cm>Zk1tbP|+f#O}}SEmG( zQZpLQ^EbvzIG1H5pa0mt#r}XTi2CEjXG$8J7i@*8DVj7ijQ4Oy4xrf$vyiM(4YkR9 zR{Nt8+iN|kE7v&5gn z=(gKfQP=b&w))li&0ki^W-3RK+L7G|{^do>Z_3fCW@BvrJrUz~)HXUi=X~u6Goi8K zEfzEdkyF2)@KLg%^rb}-%kw!1P#;g%oQ`7G<4S}Y)S&f?2j(3mYW(sa>i=K z;=$*I&$y%QV5;b{rZ>F{QWM`iImI%}-(bo#7u3bt(T~d(qp?$fjiMX}65F(2~9Z)U&Bs$er>Ke($(jw5DD% zq)V0_Zi} zN1tX#?txmwa$(ELJUqXw+oo*6m(u2b{>I64!z8oRd*H_!7dN|QzrmFE3icZy17 z49Tn_NfaVuD3S;fk*TE2i3+u!r=lp4A$esMDpX`neCzl91solFuf5iNU)OnZSQQAP zpMiAdf)tLL^l)7oN3oavmW^(AqTiB*$SexyK05E`hWAB~LiTOWU<t(Xm_% zJGXurzs23{+1O=2kZQiM9KP*rYV9|W)L+EX{OdOKY-v2p?H$61aK=AjzC5oS2O6|u z31!_IKq_;)@JGrYZVS(1i~e1{cC8457`PkpPwXl2-4Ap-ZRG-19YyV~W#R}hR-VUTEcI3($s%92i(~$a zxrU@Z-;I3Q=Rm!A9H|U^2HX5tteH^-t8F^)UpJJ#O#O*Hw<|F1PZkDFWc#J^HcVQw zLg=^1o<7`Fp<#QF97B_;FMn7ZD5Mv6{D8$FQ1`OY2NF_nW z?fi9-Gye4iJ{M%^RL(6v;L;i{_k28cvu;~$*In_NWCwCNoPpJvd*EO2Nf>xE3Cj+d zk?bRNdTMSiysuTJhezy$Ol#)c{$~S~(utJ!cp8PKD+%ULA0l_%U;L107uSt@4=>W8 zvC~ro{&WI8>imGyBY(j4p$3Nj8AYbUCqp^w2p*KmkW|D5D9@Nh$Hnnf&pft@T2GN+ zmnE)KDnRn*9HDB!Htz3AFVdUeB?|oXR^%@mhZjd2u}I1j(+f;EyT<1tDH4We-6-P9NoL*Fw?grxNGsP9%G{{AlbU(SUl<63P)^flJqh;u@pGVVVjd8g^6`X4wR4=u`H^q*+-=-t~ z%N6nd=W{7I$D6Zeox|SMEEn|Y4_+*)c)@2yXA%j9wK`T%Oq zi=Y9G<}@f~G5+o>Mx|R1n-!%p7YcZ&wRR(EBkm5>eY%~mn6k-A2*l^R%CIvdYy5& zVl<@;ZNk;j+LV-Q%4PJ4=*+o#p5NLZ{kxxXo7(nrd(s2w{PPTC+_52e{lUo?_M@T1 zVR(62h0Z)Hy2gIDdwzngUum&nQrQ(P`^hr6$!FU=IEt!&A|!kNE;o>Oj_!EjL z?HlOM_uE|SvQ1p@?@{#W@pEy|;aBMVuTp$QXoBL;AvAY}96h{mNOg>L``s^`Dy5SI z``bG(SWS_x*d2ko%o(UkPNGX@Jt&KiCyj-FFoxaL##kN)Yl6{^Hana;C4;F&i!oOf z^eM_3`juXMUGy%f%QC)?p%EEdGZta{0q*d?N8H)v*SJy?HF! zl$If4E91LuF(YBB0#(GQ;9`g&#W~HPmCnP+E@2DieXhgC?p`QHFU0EW9`uyW%m##+ z(+&AfIKJP4JIyQ;-0v2zAHRSG6<I??k=j`W6U(zU&{m|s%BwTw0) z&$}A5IdvUfvTEkS5>2SBy8)YgCh~SC9AJCik7l?2L66-o=$>llMjce8`6X$5=l366 z>2$^fSbI-w)O8Les~H1hWIET-5=HCT|EXB#CEl`~t*iZJal}P88uRP|FDrMGQ=j;Y zi@^LX|`kzG>X5W;Y|?1M!<*3PE@C+ zA^e+|3}5C~y`6Fn1}`_^_ej=H(OLr~nGs~7`4*?vKg3b9Q+U;S6E;pyFz)pW{#d_3 zv~BGnK4;!?ZvXg)P!$iurZN*!9%4&7Z`Gj;`qaRDNfRz+Vf73r+LSzu4$qoQW8bk| z17m-bj54A^bc46#>ymR~?!&VG=!sbeG8eB)6CZ(R}#%5VZAzU$z zZe0D%axiz0ZTdv)dN6_S-LMlcZJ*+To+K@=dBlkx#go0td^)p5g;qD7Mu_o3zV_x> z)c=_#s41M~wz52j?#$ub*wQhisk;@2i`USQc4f3C`*IJ*jO0J=-pJ#5J9Jyq`3+Sy z`1!$^%H{CWZhI19mZ)m=P*9843X4k63*78l`h zn45ie0vYcZ&0H_?6wa9SHN7LbS&XH3Z|W`hmb*Cdf-wcTc~t(!j^ZNb&`vpP z8qzf!RozFBANd@MEfZi_J(%vet)X}yO}g6q7H`H5$IaNwh_w2TzxpGXhI&-tkKqam zd-(uAM+<1pS7Z)*1F~t`2S=rLQQvqAvNL1dm8XEygC+FDR)bazy^at2L;3V~sc76W zku>y~6HFoodn2PcBby8%YwSRA=W`J!r@J0YA8Cn4O5ecb76*!XoFYmM^g;ZPU@9r? zg6uVAx@9|bO z;)on8+JATs-h7p%)@>|1;e8!*Jp<{mqaD51-GF{EcJw=L2Z9yGz;nPgoY>_F>*IrH zQ|v!Psa0U^$!bKXzl7)Y5*T1<0tj~Og#s<+;k0ct~djVsz)sX6E zLgyzW;pC^S2oN_4QzSCj&CQe!hI~YdqN>2BsL+oiZo>TV*EsoA7aJv}lDlUJ&0A_H zyxs5^8Y87i=>8;rZK6zD<&!3eC6kxN0jGI<;AkGs&~$d>%XE;l)|d z_tC)k!b9B8Ogk>4Oh=q&as$Wy)QVf1UgM*-DM|j4q!Vq-;k;xU?KriR5`8m-6EioX zXve{F^`H_#&X7G37S!>NEI`aNNr~d{G4nlsl^{l{4HSNw*x#!-U8n5DQ>G0YgD6n+qWS6K>&OF9TUZPE5$!YkX4|>x27tmO-6|$?81kLp!bh)^j z-3vQ$__Zs}h&Q3;W+W$9;7Oh#`{C0upRp}!@Cz-N!b?&6H(4reEk|S4U(Ry8J)O)N zLFIi;P|s-MIP!c;N4!gS=A8D7wFX zfbjFKG$c)`F(6P0QN;sj@f~~mUQmX^_Jb+x;ALnobbyt$84dPkOktaIp7~*Vq8Axk zPM<~54F@G()JMwjW)f79iK zK5a$%<|QYL)BA#^n+G5-1k%PS-c;D|g!dcx8=tTAV8^TiQPJrNe2M>uod?02kBMS^ zw+@`GQzrYNuf@5QEqv^+;gATNg~n0?YCb*%B2P@pH40Oz^BdmjNpUCqN9JxaC5yK zNg8WY(_Jg(S4*YviHuP_c%=}0;1$b5=~KGuKW=zgB~0{f$aB+uY+NQw1!WRsQZt3z z^5SDZs zF=KM^Hu?_c%wUmH6DO#o#AeA|2Z9boZ{k0$X zo81S8pbET~{>y3EyyIWHiKyaR8~^#39maXy#G?LQa5mSbM;lDYy!;}LDBDuupS^f= z*pqS2?dj?YXWD|X6yI}7l)iE!=2oba*u|eF7J5>PWD6JfrWVI!!Z3E8IsK4JhR3LJ zkX*q$E+>N!GF*{1&($LH3w!w90vU3$?6=mR zf-stVK%3UjXbSlY{OuE$jju;Tsx$2#phgEw2hrTg$+X~xGwtjR7StSn;QFV2wAWw}6%V|L zu|Hj?(B?JjeB|j;*E{T4$?oL2_Gl{}L}RzJp5?F?+#t4>&)>ySY_&49o%}cxxp-vB z*JCrAvn)4wf>*3l)e?70G-j+5V_ALYoQB>(QXF$Gd|V|iZW)D-My)s^(EzQ9W~A;d z#rTdBDgWyLnh-pif}+$&w{ix;zq78sX%`A#&%{pFAsuctjqK&SQS9~_w#Mw;@oXFp z`uy^mnCe4LVY?6zH-{#!e#>0a{~=DRHK7eL9t% z{$f6uWrf%tyhs$~?u&C1hmnL-FCOpDh4yU;l#c(!8+$l&FI?V=KY9GbLf33=P;xe! zOMbwydpo!8O#eDO&9xmszQbYwOlxf9+~W=D5=)X8{l2 z=#%)f0_jB=pyKF06e~80moUEZmq-UBe*F(A3qnZF_&M%dfc^{jpd*(L!QzJ^saT%I z47Vuk=*eI_|1?NQ3?kDT_t6~i3fmd~e@4wMENz>C*8_rhRgnh;H|SBq^3z;H%75r- zSH~sBfB2j4Mpv1m;@x5+TDqQHiPsLne;@RznE8O;%TJ)1Pe#l-w!k=Jz=Ak4N%4F|{&!&`lb{uNM8K?PO97&hzP(=AH_-IY%CU#3v*P9tad{`FS-RP0S zDS5PQSF3oU;07}tAbsFQ^gOELj=i+&)bDq(dURY&_oOOqb_t)3Z||?)yU(3b?wGrjBr@DTR~4UhOCuK7dAE zPQ=0tP0r417e#narft{m1(DU4O@2;pO1MU^8Fty?PKTq`cVGoFCWx2gj3;2UAmf?h*wHC zn2Te8V4dd1_k>P??f5iw#VBy@Yp!GTR9nie&*bD+>+!X!6KK&I37XPkOO=r$xPua7 z=+;GVAtf&#Ri__t2hZ*1s-uF*IC=-Xc6hLTlP2x;_dh}8Nl2ZCG3+<_ z@URIK7}S9^ehTC?o=pi=tPv7EgC3|d=Th?tT*?#}e@7QirtTCb^%}LV-SD`O$Zb6= zApfI2eY|;E$Q!9bBTAMu+#x`mKvcN{JqOVd0?QnAp+uN@?YSVNO;)wl9C zpEW7&p9W(Z&J{Y+mXb+l7dpl~!(0nh44>e@>xG!XG_L|Sg)+3H#vbm+7cw`~YO37x z6Nf$iLxZ%H@cdRPtoj`j9QIt|{*pHpU1Y_{1d zURSL|+F@P!2;SpNTvy(&H2 zZbi4JmE!Zq_oDQrgLs2#4IJ|e!+{)Y8rv*MO^ZiyAOAapmyNn)702>a9X_Oc(u@q| z&Er%KWe%6P}^<9ODeLz5(u@z^u4IoaZ_N8kMd^Gi|fzh_eUwNIk=N zWtP`Sz9K&6H6N4SL@|f2Cd-i=KwO_3x>&jW@ED$dVYV3$bq-<0RpL-vh@LGLButve zZ5+(p;jD|ZIEJ~et6a$Ycb6!oWg^Kl|5v=<6(rksa(89yxz9!;sjHmvu5XN{pK5v} zvuFl>b$CH>&m>XeT1m1p55-+J|GXlaPT!dy)$a%Uj6U+@H5#07-q}3{JBzuTc)I_dk{@;1Hde>g3^a@oO0En7ae*e|K3t~XWfEbE8>L%TGcSww+x9+ zW2o%B6P0~X5b7hYW2dbaZPyFnLzA?~_@o*I>@X7s%~(n@)ejJMqXoT*wis`}li&4g z430|Q$G%Eg(z&RM_r|Mej&d?>h>@bGj3$=j2@#&iEyRT(hlIs8Qt;GuC!M)Gr`bN8 zPIhGC$GB}&FYAqEITuCh^V;}|!qy7eq90haq#svsIt7zu##3bKH7u_Jjoz}G`gw1r z^;X9PpDE7xlgildb_eL++v~Ve;!ZQ=Zy}D&gTFX@g9gitE!#K=`a@-?)oLbsw=tLf z@BaAO8ch8Ms^W{)GfqD22zKpOBYUS2w0~U{{6?MT`l_yq_r=DN!w_SHUb3T=w;w{| zRh8((^gOKD$ht?X*P`U;a0)3_r{f*dY0+}#EHY#r$1X>z>WG8fAs%t}E$H9sDd;Wq zrS%KbXpwCQ-KcyE#X3Eh{>RRGV+&zfxi{51)S_*`3d)qJhIe)qjBYEDZM-6VWWI-2 zV?y~;Nrq&9RGVc91-y=(PyLyf{JN15nJK69d|EidGZxYE1Ev)6F%nmQ)o_{j>Uj4A zU-3wz5Jbt&#p31+E@Jv)$geP?u1YDc#^aj!663tiKG%lf%>!xR31jYcQwWuYE@M5! zs}Obcag%pia#PQQ(7{15P+r54cCQwZvLyU(rb6doTE*h2s?%>N?B_teRv$ZHo5`Fpr_Z?d7eS8Y(>q=pA(U59}kSC zHsQj_H_PbEBX%A}en%CVz{J3p@z6(MawXf9Usa>D#_4dG5KR?36DTWGmPW>Q#ZvTWkKxke<@kdcXGF3mHRz4T zB=M=mdof|08{N~dM({LOx-s`KCGSY0@`VS5UFt??Y#BlB58U>B{{e=~#-QKou%eFeRr+taCEN zPJF@9kG9@%fyvQ$TuXIiPFOa(Z2!i2V-mg?zl2lgWBg%%!?m@96HJbB&Fr_Lq;j*cyy%Wg{&^ew4X?1#W7m7H^u23d`3g{DJ~_`Wa$ zGhTtdEtRIY6H$2f@GfWez(n|~uEIHvo68NXya369WG>@i6*xC_8uqEyt8x#x3&*FD zMjZRMW_`!4(~B#Hp7y553%j^kg>RY9VK^GPMxc5BI6CKZ1&?m_C+|Col%deXnN16V zsdaYwmZ!r=>^L6$k#A6%S#kYja>2 zVoP!|UodpoHC+DO2JO;2Fk-!&mmW8GJvKx9m-3#UmQu>;)n7yFpGELwbFHO*&UC8h zI@SeQP(R^3_|A9)%rRpBwgc_%(cmWh8pD-bISi9wjDLD>hWP6bH#+z91$Xu&kA@@C z82{RmPV^Mw@i|L;yI&&MoSp$!JuOPvp-SzadwE?~4bpi*f|~9%OgO#{XV3Ukc9S2e zR^H^VE%=MIE9Gctz*SDW@Bwc6zsABccfsii+w(u~#DjnJ5XRex4Z4f?!NdBv1APZ^ zVsAf6K1k@DGLgRMM9_CtMS8TO86N`W#a0KtqM|56_&h%fSLJM}%~ctri`o5Wd?cQ| zOQjKu!XaZcgu8Cc90pTt1^u7T5h7o}?|X3>8+C`!hNDC@DP~br;#&WWjnEu-@(F zM}6F!vk9V|`?h1j!`qk@rA3870?ULoaH(=nxscUy{J%a|9Cei-ml4LaYNQyg@JL<@U9;Nz2v+>VX`^dxvT?M$?#>;I+0#EtPmnUnXYVlw9WTG8CGYso*+ ziR?n#P;x(mbz(E|)N2v9Bh8QcdltYMf)+Jw&~pRen#Ze_ZYS1M@4Fz0lTLzWNpjdR@NDekD-sN?0fcPW)$~lWC~|& z+<>oHX592Kbx>rv(!eQexeN1xXkz3TGC1CYn{3yn^GS}6?H52hq8+&#mPy==Gj6y# zMjI-+A&j?p54y%yWaP&7yfWHod1r~ACHB1AXy#+SwGqc$r779OpYrAuG3Tr!sXjIm zj;8HE-G`CX$TAm}qZ6S)0!-Ad;gi;I8a$yJ=2gu|V6*J^S}nMAuMk<5vO<@dJKevu zgT`z+{9NMk#}$+TJLvJkp|D&F&w6zjf@#2dtLTuIDr_6t_yW$Eh|Rmx=? zx?w*pu;0&&HUtzRSi=L$6@n0VWem-~D1q-i+c=dio3T#eI{y4MA$?r|M*Gw;Z)Fmv zvzECwdniviRf zun=V}*(g2t6Uv=ASZS(9={I6Xm(B1V-1?69F5b9mosNMETE%4xJZM=#J~YEOl3w&l zMELe%q5gN2M;MT2>uJ_wTg%Y3cr2Vz#?L%|AkTfLEGN%W~ zh54Urm%%JU%o#7%5cY9*#GAX4Dn^Yz2miro+@Ee9tJn7Te{RLR0BXtz;Qq!NVUE%aBrf(w@#HYZYtBQvRDbeiE-a-Dr#Z7XNhpw!;jir+ zMpF+Nvpt#&&11fxZ69u7s-Y)cl^j9WST` zg2Xtg-HhDycGN$}L0C&C|BrchFF!S>s8LtAEA|)gU2!<1ZVx2wrQSrkH;_0`pO%}R zK$;(8Q`Z>N^=Bqzm={8uWUg@!vkxGidQmZ7Np$^h7++wplsmD9uMdDjDN1LN5PlZanYwY;ohTGY&-QFdv@!S z|IU91vE0KAcy37fCbQ{5v<*1~?1V+eXOg&X47#58}(M<<#zT3!=^&Nc&_!yXGoW`%=a~(S6MMia+9oa4JNdfojAhw2!QH>#~dWt}hcRV0h+?M(42{S;?V6pD_{0QzZo z4)+tx$abL|9mvs!*`6_|i$5reU}t%D(o8JEcYJpArtB(q-#avdB9a^^DJU0xtL#Zr z>Ns9#1R~1d4ovD6A$f@tg*|+X=!UEKS;w-%?;as&AL}#jRTd6~xs${1r+l~44DOrt z4SZT7iED{NsOau6dbza;&(@lg|H^b+h~0Q8Moa@6}lU8sG)?m-hx zgolr6vEb4|l!yXo%`rc!meu5YpDNPdUImh#-&yW`rxn7=UL+1(AT&E}q@O|$<{qoT z%JN;JakHGo_r0vKt8O222x!u`eF`|&GmCN$&7+ALtp64N5R2WK#o6UwP_t~Up#Nzw z#!B0f%0xxhp<%xI36W3@SxLVZ6ZTav;?^n8VE2T}e7)uu#*Xn9UlCW}31>@hhJJ)p zf)QnmOQ%JF5p>o%OVI7k!1TrPG)cS}!o-tcd`hbNEJG7Nv%JzBDO&w4f>IhXP^M@{ z6Q3}~L?Rnf$F9M7<{gR{dw|P1UBNqhY(+%OJG3#6%t+%3*e`76%GW5tzhstJ>ijG? z4{F7eIt%J=;K{9ZOGBb|2O{?~xAq}Rs+`I^w(f7Zj91L9o-lS5N2R!;d0_yo;-d?wUgB%Gv1azmX!@JMqz+`;b|y zO_$kw!UMYO3jJ^KSb zvv4f6UP~yqO}xzwh`Pt6I6EWkY9O^Tr}F-t4s>mZ0_FKS;Df3q@~8e42R$81Cf7IM z1k1bIoeiY1EPJBo?8G=3`hv{+?T}P+qv>Z(;4R~MdbU=h+gXxGQ`2{yDK%#zB^RO3n^b6%9>Vsqh1 z?3`^$x09KpYxzu4I@ZDIRh(kq3oV+b9ZWZU+-St%Zf@Dm3f%JaLtUvYE%&Pi7afZa zWlw}Y<~kWRgmvF{DN^3}TfFpA=2F}2BfR~78r_#SqUeu5nVW;=j7cv)aYviZGp>e2 z;}22B=f7AN){ARmYvI}0Bs#|S%Z<#}yh=TZlkcBXF1f`6*^@6Juatd;9A0o^r597G ze*~%h)2F!!6>!s8C%Pf`1QX&Dg$chE@m0s1+BTGM&VGzTYqcBW^H$Phli?^gUdr_| zcqt<1kITc!`p8vhD`x`v(s%9#Tb%DzZ44GKbvMxJ1gweT7?w_DkQD3kzzly z?$nn6dQtEL|J{|QZ$lNSaOF~Z{DHZ4W1T2j=O6|Z>Y}b`GR~OzlB|6@XVmP)YmC~2 zz+3N-DH=rOyI9sJ)fjhM?{Qh$E>!9@4(juJP}8hT@ges`Q(bmq%E>moOsK}j`OFP9 z{Vxvcji*_SRur~r0X2*4Y2S!Nc*1;BKVrHdhd6ARWI)T*B53+zJ^E26OK*5~w>GK6 zGQpA`Z{tIs{#}BH>r&>Ue~-y7H?i)E3dJ!lrhIw<+dXaOJo5+BEg3`l^G*-9_D0cG zwg;&Qy@S5$^}Oq@tw>+MTn3X=N$UPaOrCK^bmPbx!M|aIXhDMoU-Tdu?^a*%(ms5N zdBytCpGU_bUu97o8m&S9{IW*M@U>clS=TyIogTFU8ZzOV0rh)GD6oX=HzhF7G`8V z#l4X2&@=O=4-Opljtb*TjQcR`O)oaIyYNSATM_+Hkz7<(3BUYSlYb>+z|Iuo3_p_l z=sJ#~W^yQ(Vp*JCV|p&30JT9=>6z>znp(xUEN4Gpby2dH=5JZLwr!hGH^Ch(J6&nM z^((HucMygT4n<;R1R`TBp!Bw$Tk@`hJKj>r%P{BM*CT!6S&a{IpG+uZ`bYek%vk>> z+sS+OGJ3mXk1%fL7Pw`0;UIG(i$N_mkzuIpy-PcTeKWs20!=eOgp@3O=U$xQ33|bhE2ol(>H*Ox2mU zn`JlmSZjE$pR@us536x(`CaD4cA}~0-=chX1b4w7!Y zuLM&So8_#T>Bzi-D*uk)`9|ZsLLmzO=nQj~jHP3G<@nV&`NX`a1X^c4{nxYGVc; z@;4a90sYvlT%YofcJTX;DA18}20~}{SL8b8V8^szS{+HuwOh+qod1eJ{r)0+pAmPw zibuAy0*&aG#fOF>RCtm(XMWQ&nDQ7=})UptbpNdHlylgvzJ+F zRB74<$@n_ahlEbnXCfi(=qA)=TacHJ0qWyJC_5t%+cjb+lV$WK7S(az>Lcjbk4}DB zLO0&GG>L|Br?HkCD5Xo8?WV11==?NFH}s|5M>h*`qZ`rOs70kK9iofGVkd;M$tokPHHSfS;1TJMBAF(XG z?|hAL6C-e2&vMgOeh{Ax(M67bBfe`5r0jri;&HK=C{B97Izt^OIpsp)+!sg;dC9d} zYtXd-#`j~KJcZId&>nFFk*go#?}hC+^V*u$iGwI*EbCC;l%n+Zv3Tyya&gS@EM^%N zvlA&;-WS7o?Kdz~>m42)>R|c7KIFw8!_v3v*dJp{-JzCb9K4DiC1!C?Y%Hi}PXR97 zzggkGU;~P!2Gcpl(jRer7bd^9MZfPR#Dpc>?Gs+ypGYM-H1Qsn&X^*aEz;C}B!YYM zej3GVfUY*RvbiAJg9gZRk)>YD8$JLVwyEIF_I-G(JO@v&2GH`zyBMOWOR3xC>4jb> zhN_8BAA3xkDdR=q>88+TUf3ar#?v(a8q9q@oJPHP#p@_;f|Ro{jb5<{;=$t>Pv|T< zo?OJYu3=OjavCXYf9<5y2g5lRA(^Iy`04q4F56}8F>2-S+3G-=@p|~!$p{&tL0QSB zH15SU#HS3QR%O;j;KDI=xjlu}Iu-()I3TW`6r_)bx5wZfJ zx)YGby4$vr8}WTxD3Si=D77;2r$gZZrRyH*MbP zL@rNw{*C@?B#62&_16~eYuQ7r_x%p*+(5o)(?WV;)P}85%*kix%+Hfu%wLvsfRh*V zc(Ivdm(g+zWHW;UD)XpBN{cz_>#?f!2sifoOLPr(6f%sMLt?KZnO*CLqC=r1e|8i4 ztfHyieHF_?9N}6UleuNj@AAI~NYko$3&n>9Uc$-)BH}kH(#_k}jH$Dgyp3Jxgz`*b zx9BN~9uJ^DqnFSqkNa@nOy)I=Dta@Uwc{Q&*w9dEbzI={zGp|+Ra+w3Wdz#?enF+d352bZM)wjq!M$msXx4wq zyky}wywq&wytZZ{aLg}^m-6M_TI}JPJ*JSA_De(@{fjL@0(ZySk2F#rai`<7p(r~I zPY$JEx;@K;PP&N0Bm2=x6}J0^fk1-dx)*{34IdR;oG|yS7V3qYto%* z0%L(bJ#m6N{$DfxzGa@Of$DU05IbXB@1Qiti26wu!SjF*4t~|5=Gfunxi*+qY}~}v zZp(n-nZGbHD-!>{`oiPn;%Itvq814zv1s>`r{wZXe6`GCJm~#=!=xmX53-=?ifn5B zS6Xn&(xjklU*T|uBHPd8VE!Y2;swTOylW@~vU|k}1|n5uV(#0UPGV%A99>+ZBsgX* zr1_C@AF zOD9ooa}Z;Hc+mU$D%`KM#qy(h&>rnbvA#O6UmwFy+LDPaMonlFOOdrC`wiKmhG8*b zB#~YzR{H9TTa~T&qaa5oF86ZrWj=WM`vrzAdB(b{YD6<7Np{U0F8`fAO_z$K6-w-^ zG~0%ys}I3p)+6-h@50C{ReZQ%7>%$|qN6KbVaRePxasAi@{2S#j;lGU|xPMzNh?YoxhjjE~v2Qf%w~k>xp(sUe%eiuHeEuZD ziXT{A_XlI;E4ZWfzT`d04wE!*bA#r`X zwdlug5jn@KK;!6#csDtej;Y>- z)ZE15^cplZx4=&}7cJH6PlRnXSV}iPtU}dv z#_exg!+ePIv48Y#A#wItq&?K6_Xl+;oaIet|J9_ygSz<|+gjWjdO{tqn?|Ybo-O`ROSfn9sR)6p zQTR%qYcF1nduJYE?lmo1{izxYSK2{OqMVa&3lqOoA-sG35?qJ^JyhMty*)4pTV3BH za?4YgRvD4x-yW>#GsNm0IyAWdY;y54rx^oMAX~{8AU7%z;*;W>QlAF zGhB&T1as+ZoLF;`+wnY@OrC8=TKZC2_a_g}KKDRswH9g2(4o7Y_pqW|8n>nlqLAgL z6jQa1RtJ}GXtJU;3tyq3Xb7L5avqMw&LnZ{E52QujFk;KkO(rP;{0sxPX0fU(PRZm z$u$sd;_9(sq&8{j?&8e<=Q`31pbeoC6n;~dn&nvA+B=Be>==jZLGqYhzXM5o!?2vO z9JiP>qw22?O$~Sqt3XSfO&E{)`%}c>pGT00HlzQ)FYr~EOw2oph1*;xt4UEP>PbgV ze-k=1g!zA0S|HK&Fq$Tvg`tA||L4nvpu0Gi-H77qBY3si8%3!jdF!d(q&RW}lXi>PJ1xdyQ{n+D9!8C^gBeFy_vDu8h){p8W z{phVNGiWX>=QK{0BCjS18B5uW&S3!->yCg&>IR`)aVFk1Ns`MOUE2SlmY=p)nYMS^ z3(tO>hA)2wLh=*}|LRIJTDR~4R-J4XEkVD`!nj>IU+{4!V|Dc?3stjMu`cp`nEmU+ zelK%grrVPL{vrtfHJ!pA=5_Q?V&_DH3pGxQpb*AkyEwlF)A~j5SE3)otutPDz_=Op zKZa7Vr302+3!}{j>4?mVrnyo=#VK^ZTSOt<6NUL_j-tBd9V}uu(%&4`DV$1FGvy09t(Y(UuM#;{FJ#`1U|2IY z!5rrG%osKd&mY`}=i_k{GSnL9Mig;3>k?q~O^lVtjcHLYkFBgzTdj14ou6*BIBg(c z_8I#uzCwA6Hgg}DqC4a##3S#aY=j)W)MI(4sk2GP+lpQ^$5Fst9cmdIhk=aA?Wgnt zKQ$uZ9yO3Y`^}_cCnb8iy$9Y!df2{uH*)zjao&gkx}LQUdi@rV%)fgWr__NLS7d4S zb|VVgbqa1?i@DQLt~7S96>XCmfM|tj^hB&hUjH!;&hKyhp4~a<4h1U?!(SbhXuFt$ijq|w4Yj0x9R$p$LWq+2p8AX}ytXnffg$xT1L-`Tw%Chd_ zzcUULm^70Pi)^?%dgmGEPnvH0vg318o`|28E~f8G9^nS#%J?nRq0x<5*x!YRWjDCI z9d%fDRF3L?&J|*PV`%83KWJ!c#dRACUV4oKc_)m)=?^OrzTAbbDX{a=z;>HQhh}~dE;(>A$+-N{8{5D$4{3WOE?G_r#({W?#V+?=okGhzADfd zb!|?KGa&1ii|I_OKc#1zfhKIlnFL#!-Rln3pKjcol2p1V_)=KN1Ei)#L5e~0WYk=R zgtHUrUDG4B`%7T&q>Fe|T8?v96{+N=0`U!E=84=S{KXGBFV~_a&yJ>? z&f*-dNQunj#!)PDpZAV7r`%mi!a$t>dSf<7ux~z$ix<1NjVc$oS-E3r)!UOe6wHyG zwjNE1H^s%p@o3(Bn17zAN<+5h0mBjO_mEfdI27Xwdm`L>q5+z{*-rz6Kaa5qGI=d7<}ae zS5|I9ktVE@8uSiQ#_}}wKpD_i0Ud+>G=n)|y0ee*YB}5amHmTZQlvqnhvtgM`A5^! zL3&=AHrHUTpUQ1~sYX^$H$fqM8~l9aD?7ibp>~lPDL$5@(7^e^gpqHdRv#^-vhT4| z{#?X6vGYhSh@M0XymiDUoN?5qT-P7G)`|Wk^Gchfh6V{0n(OG{%pZvU-Hg0$1MK{L zQyg8R2bZ+FkPMcfH*V&*uy-N(7j7b_CTZI3BcSooETQaJDrPpX5~N>ba>EDu({Gc> zBI7oHvif!acm9u~^A5=IecO1_(vYMnrIIER?Ru``ETc$@mQc!;m4wWO3Pss8C?s1% zp?dB+p-A}}C7UEOWGnPuzxRKkb>G)@p2zX|cx)!U=t-E+=Y-_ap|GImtL-)gh9Qn;d8+Q#{bnr*{&~Ls;WS~ zWCPn-Yb11;_JGmv{!rKKOP2HH=|)C4i}5eQNxcDN`>qftcY0HXp#}vPE@4TLs?^oB zfZB=Y`E#ej(S8>a-wozomc{tFE`n|j*+j#Sd2nyZ8>~2+hDl%cqq~u!sPib83U6IS zH=kuREhrxoI~!4K*qu@|zC+6MeBMheCCXpaDcs14G*<4Sr#Xx9WXBskbW|c5n$ApD z?POgGmXRO#PQ5&miN5+%7`_do(hObk=N!&_QrwQOuk3{Le;Z(*YDjMy=CiwpxZAJH zi&PZk$#~!p!ete4&9;ejY2!Dc#_BL~<;^hi-YT}B#e=4ixgHv@R}4O-ZAzA3a|C z0Ec2V$$Z&WtVlV7TJE|Sbm1YC4D2a>$^@a$C{vjH*O$I1k0nj_FnV{UcZE+@F$$f* zrgRzb{zne{PcFlb2${-{d+bo6)}0b_5Z^d9!r3Mhx?8;H@o7f8 z65k14rOz=~PJ>PT-tEy zoh;|26yZCZ2Oia6-nYkn!K}~&AZi~e5;;fcWysc-M$A0 zL!C)weIb7J;5_5iDx@Vhi5|t8(S{!@X-fnnC$oM~t;xsrzZT>Z+Y8N8$I}C&M7n!# z1TB?&jwdcdFmHY?-}?^}Uvk#cl~Yf#?B812KeZN*##LcHpWSBfP@|6q2XJ!CZecg? z1JC6%hM0*}*xwmVL-#1~vr%vQrF%dcSi;x;TwW+8+1Yb*2*~+zLg6-|G!pSkS zkXR9p6@?*^It%WVF>|0und2-+ZYV1{IEA|R?nB9f9!VZJi08-o(gXcq@vQp^L=Mry zFGMq!vy;j3=Xr#j9!b}IRA}Ir4{V3tE|g^55PY39Xu*9mY^v9$v_^ZX5I-ZUl<(3$ z1kv`>Zjko(r8zxrK!1M@Zd`wY(mn35>~f=~^B?i&=PT$|6;vcIy$Z8Zobfe{0xLd>V5YF7Z&GUXd*OK-d9!X|own$Of zWEGEaeeTRG&k*MQkVF1+M%gbKk+H{`jr^`kG8753yStG+_@}t=v?+#H_2u^^1)8yK zy4cZ&JH}_t6qTYLCPC+`{04_U>{PmQ7s?ejH#ZqJVY2j;I|&x1a}UpzUeq?{8uKYIp~Y9i>GMr< zdZgrq-j?yW#GTP%_C(BR2oV01FQkA%1Mkqz~s}v@EBn zsuGU-zJnutzV~BSZz@@G674ZR*f&2-Dx1RhDn*W%a&aKuCTkl1vCWxgHS@+S+k zJ%`Yn>^$7xsZ8U3d6H-6N!;pmB@gbU?>6lO(mcn|sQH($&pHvV`nAxVY=O0tTZ}Xt^0aAAXBF`)omT1<%#b;rW2+Lqy4gZ-`qJAkIElhWDc+Sj@X@ms^5qpGF_i zY1u0{#p{#SpM^rpD-}xU_yxyvGsJb%meGs8-!QjL#8dTDNwVM8ia$o-&>K;Mikq^u zm-WV%ep6|dbOjxHCr4@3QWzf{FF0;Jjf*yG#EvUU!px6u6ur8VY4-J{wl#5>XO}>s zEu~Ds%ZYWV_|SdX!@>&t|6rG~S$Z$289!&*P;%pUq#x)D<7#v#R$DRmKI+AhF9xk$h2E4YSog+DX|beB1~w&zc_4O_l^CvkH?9Mv2>Ap z7j*d`e zs^!n>IWql7cB&$&nP~96Q$NyCm`?3O9BA~&IdEB&1CP_%BwB>xo-rtEJMT=$S&>;z z2R`8wrqDVp@^ z+7i-C{KZb@451yzD)8g|bYZ~;3#>Euq^bn&beP0vj7c3#?}R!@zWEB*#&@zqx|QhD z<}BPu5FGGOfr>|&5Z%{;`hDDwNmZTLyVr+eimDJR<3z6#gG9%v z1<1S@NXc6ZU_NdOO61FV2POntO&Hbne}?BL@4|RQAyiLXM$==?vR?8;@HFLo*8B{i zYWWoQX*&0+sJr6Jv;mZT$da_iR3P=K4t*WGAHBRwq1UWTssJB&X*jwNris2sf#nu zyujk7SSzXf0|6Ear~k zUpQ=Cj`L|GRE^B6@NnLQ$n}j_5X?EGi};-UeH5)}TuWx&D%2)>7-@Nlf<@7GOqm!d z9{VjX+4R4=&>cmr^$Vu<`b_+8UP(Crj7>jpAh}t7M>=8F6QLvZAOV?EE`PS__J)Y=QozhBjhrl+^FY+aqv@nDH;Cd0(J+;(TRmNG`M&Zn#0br2_qS6 z=4_rN#`oE1p7j_R{u_q}*{~+fP55TffzFZ~JmJi%Ze7Z>+9ZTBb`2)gw+rahH@^Rk z9EAm66VW6qOW!uT;VA!m<*R0Mr%``0SAPq6B|98TJc@~zYlP(|_`lJ=9KCen$ej1& zB5G<7Y2K4Q#A}e>6z-aMc!&uNedwa6It485jo72}IEz+?Y=>ULnp%mFTbYW0G2Zl9 zSDTdbr$KJ%D>mlcMDb?9aQ5-ZKGs^WA3^D{?i*#VL#fh*3qoz&O<$P8JHn4D9=wA0 zBnUS|x&y0_NU`1vp5 zIx4YByAaI21ar6#%50aa__D!-oYM@c;>cNcYezj;{#|y^(vrMC+fkv}8>p8V)4v-B z@o#=BEr(uAl8214z@vGu`}Tr=AS<$sa?Q(t+}%iy7~m>o#lDwr4E=h zE|kVCT1Ux+Ds(ra3BND*c003%XPo^b#9bHi+`ndxqP+iBvhY~rimbQ^sF#jLFL@0# z1P5}*N;vDKQ!Z_gxsHsge!_f<=a?onpe=D<@hrMOgvLl%{KF;j;Xxq3K$Gmo z?SN``DLxiXqG(H5I(_Fistda?u}3^r)o#SQohB61u?E`{obWL*1zJP>$U9pDNz!b| z!_o$f&uhi97e-_ikphQvl@+Fk1Ie~v2~#~{00*@o%zc=R z=|^V6&&K4PWmrB=nQ zc-e0tExw>cz1{e;HhwycJW47W<+;mudlJvWb>ZcDB2}+)7>u>2oKXYG%Htq%=h)Jh zzlX6xGYroZD={@R1jrjij+^W8Ip79P%qYUE_GcLNzdh2LcES0UJ^4-%g}Y;u*{9~S z@JlvD_Bk^;sXL4|g+9T{OGfma&)$S%TQJSfkfh7GtLN)sw%GA5dpY|E{1*1))61U~ zD>sdz&HpJQRO3AUD;bQgU;U`~&q>6KN%(N}p*ZQ|T->|Zo7yfY)9>JJ;pMUv35uY0*KDokwrbhn8zc1yO zSXNf?;B{j?{vtqtUtOU8mDiqf9KTLdXBQXwW$A?>~qNf_? zC3AmeM0gjg4x2&QcfH}+9z%OI>|wDcRbs3+nIy4E!VC9qbn{q>^ou5Eg+&ddJCizK zr)Ws#IqBrSZW@J}9uXyDcH@I@ce;N#1}?lOGqBx{9v}UOCr*lFCZ|N^@v|tzB^e!$ zhSPk`Bg^33zV$~YL%xCYPyQTbl(S37^G(K5`P=vts6|l|D{y~G1;dD3mK49CB6HDv zOd0$JGT?V$`K`>wnfJl3wW49;f7sM%Nhi$zpjrBzmBt%U@{&pPVd5Yf@p>z4x;V$- zw*pl)L}O9HV3G-*P47GnXq~PyeH@*PZ+utX@olqoK7URu9(4eVR1;}U?qyUbH{nf7 z4@!8fK=SvVqAF4eF)s(uW6n_7;lGNu8y;m5=S=B&=wo;_?5(&n$Q8xwT46e^WEH!4Ei4u)YCLr+~tF4Fftb`RMHTeXhNx1nwfhZ_r;e5)4}O)knJc>`q~m9 zC()0lcHEMNAAi7_Zw$>ztwdVUVDcR8D%#km!?Vnl7WO*`wfW&V7hb}hPaDu@gA<*(d_P*Y{cIPT-8hYAFHXcJnI+`3o}b5cW=Pil zJSCZubY8F+B}YSbcL?%^XW`qEcit{NMRc$(P0>oG-uEJCYo7~ZQQB(Ey{bf)WfSTC z>^q1m;2e+r?NBt5q45he>E7cQ+Ie;}dI&C*k&*`wy@A;1XaNVFa~dtQvQZxkrPnqs zN14h!jOAWgHnALu=XKz^J%o*Hb)n|SV7%?dp zJ|#lPdqH$Hf#;ig#nAeuNBA|f8l7L1NrQ9qQof(WhPoN-C-&U&{Ls)u-ca6XMQ1(I@n%|X-@1C0MDuTsJ^=qU*{*wsZ&K^UzMWF9I9m6@Qm|bs?;sT=y&ck*us)j{-&_Af2F{o z+id<4U1F6^^l8sK#1FQl(L2{ec~K%Bw{jLvKIi&7&Y>d*_$7P-FTw$M^&NIm4u#n*Oq zc$%Ds!&cr@SK>q!ogTs*RT+A3^A*P$W(zL!eQ5gmzDC%NKQ$Zs9Y;(YmxxHVg>=wZ(JQFbJK z+RnCT4nb zTc=>%sYDEe3+L%HNeV{o5~TKrAY)OF^jICLe0vDzOQP5eK9@D`=)ttMtVf4qCz|WD z=;8iK7VD6S=bDX}v*tf6Q8A!?Z+lXnw+>P!nD8@a2>sk_Kr_oYhc@&Wq>cZuAY=`O zA6zMf^Si-p?sTbHr9k@KXW~WvRb=;>NjHR=e!RiI_B{0m<+F@$AQF z@~JLI-*HYfW3ny9?AeP$M*RQ$vk*8k8`Y-Q;nnJjCsR!6<%#zw#ub?Ts6oB`D`@)M z#+UHlLiBS-Dt1*7r@2Kkx$f7Xtjzg8`X;2e*^b7}eu$?P2DCr$0BUA#gY7&+^3xth z9&^7)n%sXdFY6-+&*@E%SJg?wIs2jVmoi544*iZJ!=TT*Lw6raaY}w3s&#*f?V-W= zo~usHX$sU4qa`|D?m-@(hlxAyKfpTc2rNF#{f`(!MOTLj%I{^UV|;hA{PCbd*r!B; zetbvDm2sl{$(0oN@jFiLx{Z*tUnCDQE>_(9Jn;W#y;nG!py5t`oNt)HxfqcYV*MLl zUUi76Hx_C=USQzIB(bKy6&?f_Q$b%n_@&RLhjE;dcVi_fuIr7aO|6pWnmj9zoi1GB z`(nS^z0$>#FTiQBHO*WoM`r!>Xz<-Ma{0skG6PPCea~^PgE>Ddm}a8(%03)u@+GU` zA8}^BI&E31Mt$_>(ul9iQT&E;sg`f#e%I%0Y`=+^_k}3G`$@JXq)-^hJG4RVkI~qT z_e~WF;8&N!)W;dIqUyoy*V*~-@Oq2&V+WAL<`~n6+>JNkkI{Di8q|HPNZy@$cU=e( zdAjtFR~U__iAjF`dQP2nIBLt11=6rC{&nGuVFhxOB*_ ziR30zfZMNA>C#r7^Am4E?ZP)W^Lcjeg<3co8^f)P?|Xh|k#Fic8gk?q%i&p$`^)nEWZ$|PnE5;lRH~1oZpVvZ^=%+MF;JlZ zZBq<&az*n~6Tv>sikdF$L};c8jXX1!wzx{s_+S{tyK0LmL)OBs+LqR3@bgdebOe4Y zf#HeqV^Kii0Y9pT--KBKrN_5%x< zUXC3GlQGeA5a(GvK>YehZ0&j@`u$sk+_l|lALqZ-=iL_Uv}I`CAwO}zr|T$~n}VI& z$J2lm4+<$A;odEW&-Nfofq(T`hM6V_ms?RdOJ6*j$oGw%zwlG~852}$B&#RQbax%X zQH2vO;i0_-{XU`t{g*M+wt5=v2v?+G^Ga}g$m)vH*9~|vV~RNJs4~2=4QNfkO=kCG z7Oyw+_lE8&>hswU>1qWMP0f#zJp*nEI{IB`^xrA$Yc9h2IvW~!i93pUzutCnGA&*@ zotoOp#J_76qh&%*nzuWZCjNJk^MOZ`eBcXIzLq6VbuG%h6H62RtVeN(C;5!rh6z3R zyT88ZBXporVsZO&M04PZ>R(hII9#$W5`BWU&KHb3dczOCaWF zS&*$=G%3&F&Oz>-HJM_KFiinLnY)E`o)Rj3dj^X_`TYNF4QTvT6!nrJ$r&wL{5%_V zFHW%#?IBdaIXid8ctFZ~IY;W%C?~lJL0;ZM_O4B^|KLIQj`Spd&Ql8bQ56 z>QOgjBndCH_;xRo5=KwwfdD<4wo}48o@>-UZyu8L)Ae(JoU7kHQudKP$%V zmxIJb+Kx2hyFM+jdL&s>dIc+IJYbg|YST6D9{#tq1?x?%Xw;3ZIA)uUrA3B(r#gT- zR|V3d+x9pwIfr9bn$%ITTY7(Hcj@w;E6JHZgHF!1!Ra>c%Km;7r5>yC=xDR3Fpc-u z()6hK8t2x$mJ_SWbm^Fyoj6DKD-N3k;pAg~D)Qnr8~1I(v#%fd9pW9{jd~}|9fN2u`U=E=ublJh1iJ%_HaO=INxP}yXP`3rj=U&|3$e{%YbZ*t+${adb?TL zD>-!29!v58e4i7mNB`vVn6|1Ht(9FRqJZBQ6243HQmfhPzCjebaxXuN@XzVFJT*4S zBY9gqzbBasg93Hwj9oNRWd35%>Cq%JndbL2x~v>l zV-!(xbOgOP-Uh|=5`0k3M5$UKjF}AOd|E9w8}%V~1v~L=KRG@xdkA`M&3lbDv{u)S zN`}?J`?n5VEk6NWkCRAi*Q2o}gUPt|G@BlInYsMCh4=ks$m!1|X<~i=ZCqQ+4j;RT zrN3`VH}U;}jpt!pP~n{(%MNka_H>l;v*m+xD!i}VQ?%zZ;0eZ#Vwn3`tbW%EK4->| zszo3TQn5Q5imsYhNY38EuY6~^TE#OR54b~T_I5H_wUUnf zDiVLTcp{~-7eyIn;_i-2oRf8@Ld7TWTBSqFhrY#)m2*j*pOGHLn9#ldF$gx8VBCxa zP)!OYZ66=ruaw~(*2~bjB1w1+)^~c+vA#E$ zPrr1Wl>G#abI0-V&~RFNO_3%vzhsMT!gA0h!;P?C%gIN)#3f zYUX>{P1k+6*R@4(__h^o&3d%ldo3HpJEBFm+{uu4e_Kylkbc*GcI@L=y7!xjh9&1= zZnH_^KhKvX6igxUZXy0P^D&uj57MvIgtpd9%vsuAk-kQa(6t0xHh#dQ!2$H`Jf9OD zGNC8R_a&EJ#v^`LIITZ_2q!k>ple?tX6X*Wx|iJb;Qs-dqwb)S?-4Kmx`6j1ROxum zHu1>ZKGZy-o0$AbU>oh*@O@MO=GdE)@e5O$q{aIJ4*f{?NiniawxS2`7H3_GuF5Sc7_ zrbW_cKe6U*EYDS@A?COhbqz|z7Yh~q-QW&A&UXE<&>CasK4;?G!+6be-lm@&=vUKb zbQPXvMpGBFtk=)QQ(D8T3~2J zCjZ6JA@2WiTNnqw%xp|`;tcEv2^zS2JAZEsEzRspt#3ZT+1mlMT!cq_tFTbRmrmN` zpvZ7FWjQ|Md9?}@G-=XWe>uwIdDZj2RqW?u9kS!Ts(>fU@wI9N{ryjl97nfcae9bg z(l!I?-6v5;m;rhCEW!^L9~S82FD_mp$J_$OGlPV!D9gDoskgp>trsk5{V2v9RjZj{ za4_BHOoLhco^fsGFJ`amM>WNnV&~dQ&I62MSBJ}C(v=A6`)V`pa#rxd@0}>B%wq3Y zE{rD>NN>v;P+`C@81~~?kbk3T`oLzmWonSzbsy?*^M|$m1e#i0gNs8}!M(K>^*Ppf zqUJzvJ$@l-#YG(9_x45ai*Q`+Js$6l5f5E4q5rOE^WUFRmjCM}^qsrGvBa3Xn;oda zx)~P-51_jH2e6Yk=dZ?$I?wW+#lJ8*cJw9_-+zFkrV{O5ttJ@uvtXYdEuzNqyLfPJ zt6&&pM?)uW#jZOCu_)}dcquv+6SB-`dC#7-bGVh5>DG-tjUFVvm|llpzNYBQ`62O1 zVf0{wf!H4Z2t&`vP{GQjf?19{`SjBzyLF4i+(#>@@76D{efbTgdD&u5PRdi{qKGnFqu(c_pl*lK0)5G9>Xy87#Xh z0S9i+#N5Xw&@t9vHuH8$n-ooi&wR!g=4T>RmA!-8t8A&YqZVN{!)TX!Hffr!pdOk3 zi3|D_%#8x zn`Y4>vf?h&Mt1S|O)TE?1|50UG$btrsecDaV-|#xj>OpA|A-8hYwJ?va}BC&Qbyd* ziI^kz6v<9a`1GeYeX>&^2V+Mz`mH|Acbv&{YWnn7aXnsH6~S0rjcWh#Ib4;R7+JWS z=SNH_^2jgrY>vU=iv^gk_R9T=hZ~)?+m6tGE9nOJW`(`xo%yE&2TNj)R%hnceN75|9ioLWqhzG*SLJbO#Xd%=!u(M?eOk1md$!IqkongD;oAV(m<~kHu-fa^ePn5M`IwZFgyrBgS+0g7Kc(oJD4cNBE@P#|xe9Ja&gJ`z)&^F5ZU z_;KMPepYBha!Wn3jZ&nGbqWN%)W1xzc0cZI=}S+~kHA2a08;atNmojgNU=Ac4Zqtj z9r)}wtQKw&^KUOf+AZD*KdK5>{^#=TX2IV!jZ7Q@aK+c0DfXC3H?Nt9-8d&^=FSq~ zzv(5MXKq7><>cw<8B1EcJCpj>gwm6mePZ&Lwb<<-M`L$QhuXD#{1@azzy5TgLsge1 zwX0A+C(cRHnSrHI)>M6J4=Nsw#&5F?IGQ<<9`^sq)le3bH5%F9&_)mZ3WWX z?z53g|6~1Kw+Kz+hGT90U-S+(qUclwsCO>I>>IB!AnGo99yX?2m39=@?qy#S^l9(x z2-+5CL7g%gNFUAf=lo83&vHE~Qu~wB;$`$yc^Fv^e1n&Fc!o0P5N>7Nmn=>4q6Xdr zTvwGy2RE$2@5Z;V^p~X(n!1#B>j55L?St98OMZ&=rCZ6d^d^53v)R^32ft=eshP`b)%60}-LdU|AIUl>l85cTqnZ+|hO<(%N zz1uEBKV$EHMN$a06_%HKQ}0_YZ0xus)~W0RAE@9(T`)~_y^izK&B%1O9GU3pLe+K< z5_gT{cQd~Ka^`*4;@|LmGKx-Blpwo!7=53`fA`*PgyC(F7r&Qo`Memug;K0+y@0^S z!^vn`8{}r+#>x${6gl%D3Lc)sO`}f1ue$>s@;xNHDU`5JtN}CmzKZb-ul#Tu+Rw`1 z_i+G~U%HO+pe=Y-!?{>D>?l|}n0^$^Wm%Owk)EVU3%UF7h~Hq^@*$fks&aSE4+;Ki z4yMC1w(;I%Gi!-DC{8J#i#6O26u3u|g6v*W*gppN5a|`v`<$MgEN*<=GP~X+xQ+rzj&$hng4{{4}LQ_&irHmw&Ky&X{qyn1+my z6iROl!80`(=5X=4MBc8uxV-%fGE>`ySl3d{CLB(hmY=ccqbXHv-bkx`Ceh=qC8F%& zD9k?b6L4Nd*?-DVcZ$(0ey+8YQ6{seJ!!PX9P$sc<-@6g}1COTCqN^2m$) zmtJHEdVVbPLmbwR5|CP9LXQWR!Q!+5&U7R(xx04M{B{tM2KOXW-s9A*JI&5ShvUtN z4|t#{f1cISmwX3ol9(PmLjmog3IScOzWeJPv1E zJn`;vC>=VPg}@KG6n2k)O$U^)yT_7|o{=H7{Ax?b>MSwgmI6g?^rb#|e1EjTg#x38 z)2xESu<@~`Z$4+?G$s@->#t$+qAj=`WkM6B-AQ(5H3DjyFkx*g&W$a^ICT{KRl+_@x7v(*HAZyBr9bsaok)WluCPHD zc4I+nC#1P;?juw)gw@V5^yFbSE;Op5_^~1NYut_Gnf>uw_O@7edJ|-}>yeSo3vm-r)_3gJg zGj#wRH&CQJH*4;b%%oWpqsXf~MRe%30K=NTVQ&ha z6Y_n9apUKLn!6Ck&sY`fs@XlA1t>h*fbV~+aGq!T?!Nws=@n#JCfeSlZe|Bj*&_Kq0}=E&dgj|XGECk z_9x3fN3e1b&!fok_t+g5lIgjNDfdxf{o}UdT#GVyhG?<1=5C}=vWwZrl_SDD7?~e< z*6nmYR4%$Bclcp(i_#QK&EPEgh3X_ID~gXk_n>j_y~WR~ui;?l4!nODLeFA}GS2vd6Yl!{>>`rY68dfM9;JjzEux58}_%t5E_G5Z< zJKP2@eg)FFnbSydr7Bt3JVNBBo6@%vU!wTxZc+R75^PT5TigZ*5DA5ilO4}+)Uj2 z+J%)7>ri5F0%t}HBm2m|yj!bGdzQ=6nlW>!M-`t_Z+D~xYtnFMjvI`PQy`b;MPbtJ z_-!+VJ#}1-PX7knGVeu4g0EpfsXR_s{bO-afr8coEex^vjXEO(8u+}L&28O?Vu$C@ znOuW!KlMnTy+mU|54dFZr~i&mp%>Q&@qJP%Tm~PU0OJ&5D$ zK_-P7ZjNQ;Gt_C{iMyDasKwGl+;Qxj6K%W-n1SybzG4`^>t&nr^WH04dh*|Ep&>UN zEvwzA&71Q|>_?%$e;IQj@Gh1e~10xpe z(d8|EB->^~R~~L)^_=@Y*f$Ars@8OGX*Plebi>acN5t{+5pWqTM>A7m;b{Nv zls7F-oZ)pAudfv%t1OIGY_q4!!J<%+`-aaH<)|xf9;>tIPWxB9$I=s>LWgTC^)|1; z9QiMhTUH`vsvCsWe6Btw@G3@nX>vbRA4pb5(p0(WG<4}f} z&@vzO5__`Pua8=fAkvz)ALb`kQTN|7P$`aKTTdFYZB2&a@$o-#*Zra}&-M~@`wbzj zn?G^$mpL&pg`STJq1(fQ#Y3D=+jdx)_76;=YmFlIzLd}p_pi`-@eBWA6{#~Tnu7P} zLV930x&A0XbhPuxD~Ky3bF}j}0Q--$N*G%{=;atuJTR&&P_A zJ-ED+@6Qv<&`P>A9W&@5&q%cIlqDao8&zMw6S1!gg zRP;11W=5%wVqEf9>0KdD8npZh4oYPtcebBJ)ocTjJ?JU%>KM#g`28uBJKgW{Jbs={ z5~~Xcq-V(wS;aVW#Gaprtvb^Y%Ng_AHXMR1pDmdTmnW^4E7-CLoD1}Kll0AKd(w&Y z|9^k=<*!po?%)mVRduJ)#qQLPXZz*+E$E+L30&1AXn%VTP0gt=vNWKHb7iQ!=p-r> zjzU-QGDdgfy_BAUaLvb!@@ANj;f~eJXU%=Aw>!f+>~%?Rj|WkYTgYnDruP~|Vr&^Y4XSl4Q*o6=CW|&K)l}g>L_*K-pN6nxApT;<>RH zDqA5YuHffIC4RRuRi$1}ZV9QUw8_cKO&qni5!8MF`dvOWZXb7pZ!;5Ty?zI|pl;N0 zQ)FeAk3Z;rM!M`bKhzvSTWb2FM0DUZL;Cerh+ zC~|Y}P3J>CqPMqHqW`lUXD1&JrH3*BzM~JG`;QH~wm=e|m&Efs8_~MkjdvL@u+@9J z*e7Qr@qZ4p7{?50mh&UnC>T+}AvxOJ+ky(GZY7&vGpWb1U@<8o32X1Xgt~4d-)U_} z(;z3xdZ|LOv1$|=p-j>n)2MHqE1vflLF!!IyAe!=4&K zOz6#vCRUs`kY1@Rq}4`dWNbDL@+bG9Z?HDCmPX)d)z=ESoyk-&bQGPD=UMjPE$F6p z0IMRE!~yZ+iTyi(w`-Tsg4z2KTy+^M6=bONgepm2ok!SmZ?>FgSX`r3>EzziaCm5f zUTtqt7phOrqm+f=#SZAXGn~5a$dbXtEx2_#mbIK4DP{%k5Mt)r;@0*oSeAK7Ds7J9 z2j{OhwXa}z3-j3NWxW5N`xAA;hts>8uchTtzI@gaB6jK@hSGVF-T%Iysp@!9?V=1! z4je{nhI0P%4OeVBn~5f&Rr>an61}YjQg$iOf}viN&s`|d1xB>9V>Efs3c{gYk<^iX z21>VcvEgt9KEjW{AT2z}`Fa3y-MQC>XQjdxm}Sc&?# z|F@4+!qDF%D67Vs4&Ut%{<)v$+&O;tdlx2nS!qy^t}4BfFtK9aa&i&*+5`{<|SRJRTz-y@w{x+6KQ%MvwmznR2Ow$zR&Q)@sUOAOF5S z=kFM2wbRV0=>V+4lxg#bv-oeP2kmejMEz8HVFvFGydE}}OkVPHY~5mf=g*%_AqG^! zcU?=1CenlBDYWYF7?OMZ4}P-_qLt_4Oyn;MH8qo{Xx3>Y_%EXV++%pfpc((E^0R#Q zUv#ue;XJ>I6$*y5w2Zs_dTpl-2mG4?> z`}L$9M-GV_g5L3G-4`L;@+lk9`vq>k@`McM!TlV_S#AE;;a{y!i*Xs>ALe63|H0(A z!IJ)}In(z;Zi1|07EW)sple5mQbqP83Qo^tD^E8fJ|cz5%rvLi>PsMnOq@OON36Dr zL^gLdm!xP@!pagMd;VwCtAvW{RCveCRf2u>qsZWoJINeq7nJPz{-Q;TYRr06h_5(1 zzEXo6#&;LHJI2x1N7teL{TULi^pM)?mY_Xn81e!iWAMRl^oaAXx+TTZ;pjx#+`StG z*p~3#)dbOKwSS}!KNyv@!8526PF581H5KziQ12ery+WKlRBn&Ygb=U%9aEB#jq z-<)buc0-f|wsCh3KPNBD$i?HE4pf()L5XV<>4A2ssLxqKE$=z&zuzwU$e-QrdW_V@ z7r<(c7CDBs;K%s&B)2dC_skS&UiC_hHr2(f%Yxcn-(soORHoLl1A}b;aURD$3^g4>DpLp2zynk1 z@Uh`^dD$8&3^StP8G-2ZJBq4_hIDcJVAO@VlR-fO?LX;H`i5T+b~F-QGqQM(L|wd5 z?Mc0LFW_VEI7(vW_`|y*6A#E!9`C=3$bsz42Y^6)Qp4ml{^#XdbF z8b5zwd0NdZ7yc~7CEH-oj|YNHABa_8!cJsjNl%3an-*!b$2~0hLx(&p9{9+X!?-d8+8q>OWv?; zyc4W&Ybffpx3w3C_IU-%_txSBuV&PQ zD&zav@pM<0=Shr*iaIkcfVZp3$Fo?-m+4Eg*Y#=B(IC-z|1wILz&++y{~&9TJzC^J z>qRe2{Z@|ql}co=d>WPyoKBhi8U6E(4Bc*g1E2q^=sf(fdfzxs$P7`T$jlaslJT6+ zrIe8oDybADMM+yjDWpM4q$ouh4W;3mhUc6lEk&t>RMMoqi0F6!{sS-1bIx;L_x1U_ z-_xG+a?HvC%iqtR8m4vVV7Q5OA8^q|s_mr;8n&RFpS zyBQ;Dz{2y;=vkz3FSl!qS0@tJ`%(WnmB z{WWQ+{yiv4EhqB^FZNDVr+0(m;NUU?XJ(H_oDXA@_84LHI8%7s&V|$YpP1UKPm3<@ zz$d>^qUL)usHIzri`r<8dIK|(&-{u4uYTzJmxfLWC3@^}6tmBmkQo)E5)CD9#bK`!No(l@GRb<1%-UoOPpCwnt%-1?c?$Ku zEk^mJB@~c!hB3M?;aQ0s9o#;EvJ0v)=pTUj{Iox;=n)m(^j3MU=qO&X^%G0McA`gTfC_DU@@ zc1A(_l!DkwRfB#?&Ot_#0` zmN()1kb$(}2+Ko-mtlMI5ZdaZOIgv^`L%L^E*5!tSkGqS-T{^Te~A&SPj`qrW>x{~ zh?CrBo8gqrr^Cx~C%h}At|>2y$LfPxuIHN=G~H1zLA@i*qNu9Z+F(NtmC;ZZ}zk@j?fs<&8l12@f+m0WDnA+N6?^|yXk6T96j#b zCCo2!!+}c*G;`7ccwc7u#|m${-gO;^JPk=&x*iqn3FNYWE-Yscr;8ck$jg_*E1S{i zWAjP#1vR`@vBL6A88CnI8=@};RHmMeKpz!O?Y$Sbee!8;>KzUI-lR=lZnCsQ<1?o` zEf;oa>>e?Ixkg(VoAsd%t!|TtS`+IhuUtrp9~`MVV-?a?T*997!zop58$ReVKD>Ss zNsVAEv&EmVbVx1|)?J48p|8C9`x#VbaRK{ZEM(tb_RO$u!Iuw=RiV~{@g2927yFTW zG+&!?Y6j4hj#M(-(8_(P_=u5{*{ssOuOhC}2`VwO$aiHwDoxyuG1?iN$0;4Usrs4U z)2#~s*M)G-TEh>VybJn|htkV?$(;P9DRl7HM6ye4N5kiFWb81JPjLyPLD^P9Ps=fE znf#tBF)tHM`aXeHWgUU@SfY6gBq{GAhvBbX;rB1dC0L#@dUO3y(DfIx2>~?KPl_(P zXpxevH2*7P4OV-H(BF_mD9+e$dSUF=M>&f^rF0A*ncbNRg2lVS~9k;o;gjOZ%MKjgXnO=8m_<09X!r@%#AHFCu7}WtkT{ARiAG{m~<-Q1Qm*A z+4#h|QvPY>J3KoxN~qAOLPV?!F134*`*<5_=q}{vjJuC0ZRW5&`%9EEc?ivEW{yy0 zb-_k`0|ozlipU$UF<_${Mns=yoh zixxi}Py(&dlj#0{Ol-3pMZ3~cNaIgDIph@x*5#pi*-wfzhU_HS;B6RpPDIxRGnS*K zF5NxdhAYMY4|GjwR9hM59dUw{`%XVYwr> z#~Uas)rL?ymw77+@pqRd-HUX<^&^hd8n%I!-ePlN{wplyXCSqp4DW0m1<}HZRNHYC z8mpGGJWCDM>Q*B2rUKpDq(s?~g;vtW79fioVgB}Ani5s!}XKoZ-RuCB;;p51g6D^?rQxMUr!Yjz-~ z{&E_56v)xuh5B^*@DSc?%}hG5V6otH^#*HtW^>)gJUE%i>Evz}hxi~zl56^g-Lf^@ zkY_1K8$XCESfNGX`Q}hzUF8@4ZuH~+cU;$(qmB+=iY=NB6Ez>2@%5KUmXtL z9t8hI0gRz7P1yAuW5>_L&G=fh57wqZdijFkm40LqVM8TuT39#w8Vc)pF7~-Ph0PyB zx8`;son;UoOIG5zMhz||X_N3%l@529lmCnmt~#w04h>r5d_+wkMaENSv-joF8peg! zWicc)NU$=&_CGvzt zts}7JGrJpbFT}68xo8dyqyYbCu&>afM_U?Tki@*AKb#;pYAADCZwB|n1*&DvnB)~k zTjC7Srl^bWiDghr`i}M@eVW^vg_`KA;sX<>(=-on?wyGjW1?zP-4Mo<^?5F8n#?*C z;s%U)e-kmm2Bfs&C%%RFaSqD{(FUh+bfkp2UYrt;xcw3i4|$2Ope=At?BcZ)!>G1f zfmSHJz-#FVkWq<(Oj`;!?w2Di9k&PBhS4;@tqOZm+d$2oSouMbMpooNE4Yhgiwr2G z&W5fP+8~qpiKGwfke)$4a-N*8_!=LLpaq6xU-S`od(Xpe#uIL*pdobT7jgHN_>0xv{t#=ZXX_;IjhruXvK#+aKs@U>+>x9FcYsVe{Y-DAe#pX4o#c zjR~MP?0ML_L5&o9fM zD|(QnU?aYu??&NID!I`OO^CP`jCyS)s#o6$so!g1EIC@3d?*l~$_=SIPmY~aKJ$(% zq$$X3{(iW2|*y!0P+F;y?A8nRG z{m*zTdNrIbyt6`5;AARD+=2^%E2%X*4Th1K+_@7Wbb9;?{+s)AC_8v@j}Tk>q{sD>J2_;Vmw$dfQOCZmTt9Av@!&}42mRqrAGf2lxNPRjh@~?{bu4@Q z3(0Y9xRfqIxn=3-OkwwcLyReLodNwYSWoH7%eZh2WAbHpVIy~Ci$3M;#%xzZN>6

E`A>seQw}y3QWG>*%j~)z< zc#kpcuJ!bs$6V2|eBQpphQe;O@+rs78RM`XT?v~}bx)rVXhzGHmLe!~1of%xL!-i0 z^t#$oUY|W3ikU*MM}82Mq%4NiNhOkr_oGG!C$bv)lB+cqWAeCZ2p-B@KjC|X0I7f*zLf(j*Zf)hq{#>C*~!>^-P4Onp8ANTam1;J+AoqlZAUO%qv$?I@@2~pP|or%89tf zF)#Sc!R`27t|4xodIygE*87_G&D-ZC43U%2QB(u{B2X8SiO!R43mukYV{VqDM5}jP;hg#_pZlq*BrXP3# zRdp>g+|`WYt>e*cwTatk4E zg8Zpr8{^(EU&P`SY9wLji(UJ`Nj8Jomu~C~+Hg&k z%KzJgo!cu#6SWBAS@wGMFe&;fUCg@B-?>RI)C9GX3!<0<1Nf}^FZk52mMaZP!2#!X zEYV%dy`OuG`*~_IrO3X+g*WW)f4+c|W!y{djUt`~iqQX^2$4tUq2HS+^kmcn7+=+- zuXB5lJxI)*IG+xkZdd-}Qb)3HR)O8bw^(#1m~qFWJ82>k;nPvSutC z?SaqpG%5YCGv#aF!Prc7vTi;Dr|_w0TCYcKjRtg}Gms8#-O6n*&Vb*bUyQ}##apEf zuDG@)ih>S5!J5EOOq=-!r}LLXSvM2&mlz2`-d6l#=cc=zjA!unG2fEPdc!;C^AZ6P zv~y+}e5BkN7iuCkzH}6R@A!(yX;QQ@BZ_;rq7Z=}y09~@g_kH;PoXsuw7ZaHLseY3 zU9IM{MbLs&=~hhf(xsu#XTcy^gZyTOk}2bV>ULJ3*zkPC4~-7AT%0AG`@lM>R)Z)+ zsXxU2C&b?zlkm58F7A1)VtK?y?zQMU=icy+AO4BQdLLab_3v%qj0L5S=|^L3n2_$9 zIP$fdL}M>I2&YBkM#)cNkzQ| z_a?gIq-+WNZ!=Ct!3%Ev6-O>x{xr@Qe8a|TQe;zl4;8sjxU`$IsCxfJQMi!?*8h73 z&4kZb7Zbo$`|ZMon_6^r`3)rRV2o?FM~stP!>RmIC+)M#D4a1^fBhK&yY%f)Jz_-s zR&zM4$P)R?*-f2&#Ll=axV3aPIx_OGD{!JFU=0=coIw3f0TOZjDV_DL za@xSyPGj)q>2{x_Z&&`p%eIN>V+dB$mt3Br8wE%#Oya2a$aJSC$|C=!f5)EFYii zPHJjxm|4<~-ZFRG6@Csz`1sRwxhFWf=nO1b=BGA;-9h9zvHQvwxYSgOi)}fqC3hZY z3KVH?(RCq%%~b9a0tNFz1&rHq3x4qmaFaDAX@?P%qgRK22UTeLS9UkWJ}db$eX?SG zhz+5&e8@v5{^a&mn7|l$OFj+cTcp-fga2A_x-hpPl{@3i9 z+5NYdDmj0YrEaeX;ac%eM2O}HBTSy)%}X!5XW3^3-is)+lCKGPh~zUG)V@LB_arKl zX@)X2&UF!v`EOujUoTRP-lDu!30FP0@ahKo__Fd1Yux z)T!L?ChDXUsAo+>e37FC)YmB;Dj$dHLt z6rC9DNk2X2;N665cs(~G$7{0@KP;H+ns-szXBP@$_pGa0;_>?OF+?BI5;ShH{?E;O znA{Ub#mlp?WYJRuxyg}X^hfyhFM(@?h}#q`LmE@nsZc$eZ0aXqmtHgMS?~P);YJs? zhyOT(OVO0SpY1b`rK9=qX->k+oQAA?$Y1%m2}X*!%z>3AmR@lWkLDWC29Mt^F|ocB zUNV-#A`~cni!-fQ&@C==^`KkpRE2=c2hjOX1#eHy;5LsRPg%@Cvu3R;C9^K6gXc`7 zC`ZDu{z8TA9M%V(Zx8p@PvG7MQvT|%P<8Kt-n+xR`|bG<+03QbX)hU@r~s2K>T&qS zCe|f!B!{nUSRMWaPYe`s$@UUt>J{maYLPHqR)#da!-YP+kE@yb0+Vw_L7lOl2D-C% zjb=55Sq~+x7{&};n}ve62Bg}gNVQ!~RH>)Nf0J5<(0(K6z^L&wbo64D1+*7wCRN~C z({gVA2Xl(;IK?;%YhhU>aV@^q8sc7QIuxKr>NVa1I{VOBzC#F2xQEwM6W}s+G6_+P zAzt&BS4g^#T=T)SC8$SyX1^MBPgSB7{rd|)q*haiYa60}wqpOhk%+zF$Co&mx8^(N*#V!R;{VP0%EwGh*PlcoEMs*e)0w=k?4oA9M5>7N z5VShB!me@*)n3?5*5&tMnLUX_d6%IiIgnNqy@tHg4w~_0HfpOSs3$87BLjxOY@Z@R z90SN=_a{zzu^CQOFg}XOXLOwy&2r)AkXO{g-FV|6x@o(NBvwj7QQwe^Y8vo!bG+z% z^8s|MGo|-`neXS2h=MhgDX5n@#@J_7D7&8Qhl$9=-3;@u9f66t5y{E*aD~q%QtYK| z^umkHW}$7E5UdY1i)_ptVl3>Q?nd7CZy{cF9o1RiVp*mtn|sWv65*qenFomRX- zz8qf6fKN_G(=PyO_5gI!qv(?+bGGiaA)BA8xPRK3T=)PlGFdg0iXT`~#?({1+zUtA+p$1U>^hAm z-U#Qdw{zF;&!Q&%0x0}&pzX&Q|LwOT-rdT?I@2g#Yl0fhFEzzQ=0H4oVmvt-cHx6g z50)?Rq=si^m{{OP@13gPzwIa_p7bLv?{%nhnaDVew~^m*AJ0tYa1h@`L9{wWZn+|? zTt1k#PS>G@uk*OFX=2P=`i-N{`joZRgo^aOU~sqw<$Ih#?($oZV@@N9Hfj2uZbZ$a z*11HUW-dWHRr*m?&3pd_^wcJ>vqdY0yW4ZvuSIU%$>@pR4_DV8!k-K~%#PKe&4G$^ zIe(6@ZOJ2)Wy}}y7rw!chlCLWf+>UT>*Y?V3%T#^z{6n}bs0+wTCCSp^o%*k&Ugy1 z5;joyEat-s{D&ebLzGv69?DI?sOowYER~~kDI*c}B!(u0ucWd%Ioh7vf+^edg$3;F za875g;C`rp3mq_pBt00HCSQ?L&5y$MnHey7w-B6%3D^5uf}fs!Nu)e=0Hp;MiI1@z zO6XEsy0eh+FM`I9aO?n`^xRCYN=bs@rAbg?EYDrLb7AUPj#D35FW~bdWZqU{jKB&M z6>Xrxo^W`2Dbja~RX8_jDF*y=guQ+k%i~YL0|ml|(S}&SX^MAcq5m&(R_KDb{@c}c;D{z-_Xr50~rd!k1(a81^*EWQbL)N{f;FP=c2BPe5u#DF3s2Jz1<7LOJuFp=#!GIGsucYnBVH*#R^qI0rYSmy-JZ zvsk$CDfR}ck<|e;3M>|*`OhQn;Ok+e%$d-km2r5StAwX%!zj}4Df)Ki^Cus~g70*r zpN!Ra{zN3av(|7+BS_vUX;RzBs2Hy^khzV)nt|>@*;lWY5WjTj5eDAOsJQ$qZ;!@RG1AW zmER{|SC9tDm&W8NvZlJL%)^>*0RM5d%yqgv64Ad4)2$s=gEKhW5Z?XdZ>YL^kg;WdRt=N5O3zu4y4yoY5bb- z&lvw{AlWL#i@1YIwCqDS)Y!8!`ffa}<38e%r~y4o=8ATYJHS4Nsc_6Xgwrb+54A%V zy$9#eVBJMj`A&uwzrKy^Sd$7L#_>Folp^@flf@7_<_mh-z~=M-I(EYiXkSMrV-&EY zNm*oh!IOqXKj1eVV@}&!h2lLdXHi{YK|l7f-7@2i-B_AJgJ;d76Tzv%XXYcFk<)_M zKS%Mko$YZmrqXQ}DcasAL%*73NVjbbRd%M~{TS8@W@nwVLGpNWBozO3O`@44)!e;J zr}zN392~b`xlDaM>U)rYUB-vFyzmNc!fbgi*kKM%B)@~0aZBX4?&OBn?8Oa*Hta0V z#jQeJ#=vA=AZJs=8jWCw`}vd<%=!mDnYbEU4F_#Wk{B8ThSZ=IujS;^q)Ri6zeCkH z4L^@=LvPP}Ug1j^?H93U_xmzh*u^p^F~3lyu0*{oZ#*Ki5v|Q%Id#^b2@BCA*WS(K zBQJ0gj?AOjC@}wZ89(3N7ne<)$lJ9W&f(iYTTXI!w==eJe=Yu=Q4yDN=MWk<{u8IJ zTaS;kENR-<7u?$Tc{FDF6!OdJ!%ia=y6-)Re^%x}){;ql%gt?E-SJ1<48ajH{{>Qo z#w|2TT2l8?MY2yCfg6Jf4L?pW-={hKc+4}FW;aTjm`hFd7M8uTpwKW$!FKl!ED**} z!uj*4`^a_{qC&_!L}6swXxhKDkvZV%8Hem9Hf_I$o`KEaOw0JF0M;=Myw2ZP+by~x z^#Btc0Ivlsb6{>k1NSl}l8F7jGN$C*svI~JThXTy2YS3HkoLT{L`&Tj7%;xULEA}W z8#7?gDBY))bXXkxDYd=-WU9y8Stx%``48GvDzNO&U8&8FuU-90c4qh#` ze6gxGf9daRd{ny(v6y+E?n>b8v&A&*<03K>t5B?ODZH zDECznH|*3@ayQ%zXPLFsm^B&_L*>M!5o)|rObdTk=`~#L9pOzUKSWiGIh|Vm8#bMC z6xp6g`C4IgRw+T46`zi{tpd!sJ+zg(h$YXb(73)&EYI@?lbZUoT-at3C1vBSJ5iQw z1SvN$Ea*gp&R*Yqd%`jJ%5vPR05t(>cn%Js&&Q9|7k?4I0* z^d+n(V(~}h(Vl?Jb8I)e{~ENS2GTHgCX{*`N=*xF=}OcBN}Xd$bs0X`^7;UtkMBTp z-#qMWHm5M@HDuVNN!J=aV%A?rSY~d6`Wp!$;gT11`;=i=-(tEp59RMIJNNS(UvDiB?n8bc z6^y7CmmM35T@(J}=3Vy3n&Lp(P?3RopY=$SaUM-$j&pN-_b{LFTz-!^^Dz9E4fhrO z=t|0L(sFu?f!@yagyr}v@9%~7A|1AhI)N9(cIX>^6>2sPFts(NuJ?bTa_1_3x?RNF zuJ2IT!Q;dkRl&Q)k=8yRM&pvkb2k-y28Y6ib2C7I+`bI8&u-F;-(AMA46g zO3k$heQroP&%WT@ofrIAmV?`8pesn`yvFGP=Fsk#LcWo~baG%9Z=wGkD~?Fhu=Z5( zopc2nrYlGLPK65kYgSSIe~gJgs|wpYy*TGTPpIE>A8T)NMQl^E9d$Y;9Z6(Fu zQJ`Ff7Q7m2&Q%Wof>G87h4~eo+_2`+v{^o#o4vb{J8fl;j}bGFGtd|gW%iuZrV!4n zG?FjQuSUk(z2a9XUomg^a5{cWj$A60XuI7m=H6LGC(Vk50dWbCxZQ%Fj=hMgJ>% z!$|U{BI~>`AGoPHRaI@E2cu)qcGZe3R2IRa#TK_i;!&DBjk7pViCE8ak7AuJeMV&-($<#%6QCy0M zg9Y8>Zi0g#I5-rAKK#+Ng5c(tf5E`VIrrRiUz&y4nc{+#*h*}xvJO^!HEFA<%D8fE zRjK=%zPDbg`(+*H(_Ssm4{b8iToMZ0dt7rk9xY?(F)HkvRBV<{6J7|@i;6r?ij)nH%3~tFS7^O205m}P? z1-;~*U;16hGk4f8@y`bS2Y*iP(#^pzgmI6X3-)j9Jv5>}!S06nnf)?ZhF^on=>0NPm>82|tP literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/set.000/force.npy b/source/tests/pd/model/water/data/data_0/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..10b2ab83a233e3e9cd9d3ce05cda586295edadf4 GIT binary patch literal 184448 zcmbT7_g~Kc_s3h3jG_`H?L?uJk*?P{At58mCK94jR%A;<#<$PE@V$P!UO&BV*Y$dx=i}V(=OoUUGkxX)dAZGU+Xt;!xpDc1L1PUD zNxWK<%hqgM*?E2A>SgOzc78V>V`XVDa`dRdmIgZw z{(m1;D|>zH?I(dYakw}oqnxVJyNilNVHkYdiF=&C2jaQwV$6v*u%mwl2lm~~>51zAAHif}gxIU2K|KAzlv58BlW&59AbHabjot6kTJIVr za-zw5_k+T>+1$iZGQWBTU$~8w{>zL5gVl=|{WJK!*=&B0^ftN`m&E1Q=U}9J9NTGBl9x0M{pugltwa+z z+cDZff7eOAqcw#WPv67i0?bgm^f;RAo&o*N-)4D7W8AnypI5D%!t=*1hNLy0c)9Eu z9XOG|m;V)tfh#-^4|I{$U5er2Ltr<78j=)Oo`%Y~mNMJUKiRhE8CA)OdGyju=u?p)8p+P1?Z09Gw>P5ue`Be6Py{LU z-oOjy76@~#N5hUg#^{qCOP6O3;NG~7N2k}KR>f#EzWtE$_ooX+$AU;Pc=I6fOCmd^?EKWb;zYhj(6bB z+y~HHd`N8kaS}8ydV*OsiB6+m(JAXO@H*EWBaFLKtNSwi{$L?rx$g>ThioxGQ4BpA>p7K_D%asmM8yW?z?j&8*NN04Ibh3#%O6$crs=^ zGn0LswGlUUQ^OQb;N{&Q_yRY ztXB};YgY=><~$|)(Ec2`>;O!Q2}h%4XCTl?kIf2V;q<8z7_0V?JT88vrp8jM-I}CST#_J|ldp^%hooUgUpv3AjjW z8hctl#mJj;MW5F{VVS2dI~m>=wl&N|E7am7?e5~SSBBu7xE9(+>%fhd#JKw*j7B(-iszw+R;fbSJ3{_3ZvA|;?jate81=!RK*6tpneRGd$=R? z+a}EzrOBy&2ArHc55Gjr;`9}bW$PwRWU~jF7+JrNr_*te8+MJ;qGRya?`*W`UMFm~ z&*0j=L*YvMJQOTLq~Gr}P|*N+PIBypYs#NO%4j_lkJ!_OsbzxpDHB1Fizz)h5qi$A z0nNb%h<&U$wtq4F_*OHv?rJa@J!8S>+6?ym4lkjlfT>F+iO#Fyirf=`1PZ991u2Bht$u${$1_ijIEOu~Mt+jtk@pG_*=sOmzy z0+Pf$wHrcLuXuR&rdjxryMqlK2uC|@fyXmdW&Mg80RBCqwj_PDp5ukr7q1cZ^(Nt0 z!3yu01_()m^m$7BAzIt#68LtUmo7h#Gmrl{78PDiTL@#_5 zbBG%QBZP3L7+#XPiMlnXqiT0w_I&aVE6pRYzpVwFANfvftE?f4y8-`3#*ppNQa+QY z2fN#z!0)rMg137K$GjYYe_coN6Psh=#l9)n?d>gY`EJFTcAoSl&jj2*nd03Si^a47 zt>UJB?c$K$HA3DGtFl`*uY{wOqs8N;Z-t3yBk>G8#bd23WOG{$xO=7szn!(0T+i7+ zY};`fwRa;2xK5@p!$)9a@`*A#%vf&!4RGG0iaVb~^SfeKTHiR47a#gW4iyHNJ3bJ6 z3w}}bH<`eOnizJ|z#+%zJ88`K;}X+bq!%{^GXE;l8)@Z!UeMvAT$1Z6Ge9o?EN4N3e*inyQmhr)M_il%gk_4_(|#r zJ5BS4bit`p=Yhq}9L(K)7>Wn|lPob&m!y3-2kL41`1ZLH7T%4(AN%C+i=7JAcZsAC z*VOU&cNJDVxQ#t8PDBm&MzLV>ZRloZ$A>o`s4DPl z#nJe6_;McG3;3``DQ!~jh6COUykKJ*Xbip2{yhpX+wmgYj4Z_)kFC(&Gm$|%lO|Og zQ2A|%XsCXaoO?;=cYGYF=B>t@)5YSurxPetSU@-P<0RiJJA@(wJ)C+ym6g|~@`Vaz zaqqto*t_{7RUgWMoM#Na-!6#R82nGPh~iYDxyjN+o5X6$-lFD^EjiKiNM=}Lw(oNrW-HO#)s zmF5#!bN2$?ygHL3^=-Iv=ydcknu?SDy5KZ_Pp+#~=JBh?VC9x5p1GXFQMGzpc4weN zPKh5a?O{o!mA7#7$VVU`7knga-PjP)@J)*~Po0yO59OWI(YtQDOsoA2# zl6|;)Qat2M9>tb}P(&hd~IZM+M7q_?ZrQ`(BaMFS$ePw_vjJG z#YXDVk82g7{JFWT=N(grt@=3(dm1HPIgU6>)Z^&EH>5xFM&Qm{J`ghcD`~Azz^OIf z;&xj*IMdOCtH1nY)n&HeJT{z*Y-&KoAcHrS-l37L+i-jHJn_-r<&g7df9^+FlXqk3DQDD!I+`_7hSBZ)gqVr* z_{e%oZnBLQo`)OKg@iL?^m;uU(2T)3J4ceuczb@fBSNw)$y4kzR)tr0Gsf`M!@0tw zH|QTya!A;I35HENMI8?l#T?BDQi)rQr)|T)T5%e#dr~S1`{qF_KDLXE4?558y-(q> zUyhjds2wJo#PcG961%Y7EB8V?LB$olCX~uf3}?xu5N^DKC!~|g+}7} zKyTV^5m{!nDhIBt{wZw93*#%XrfmDiLwft&A7H<&G;D2(*k)l5YeF`V#hT%uU44*W z7aPHbjKMTnNeAz4`2lBX7%#Rs3A%EUNepo9#@wvOZy!uz$+sSy8}tZ_WCLkce={)scSbOKRV0+#{es1* zXT)d^Uo;$ikgVS?!L|-fwEgcFE$I7LQa@=1uE~m{$?kF3`5eLuV^g@d{v_TR76ET^ zBcQm?9}daf%U4xaV6TfAbog99K3{kmOD!FXXH8w7G-Ca-bc{kH&Kq}0?Dl;P#I1CO=c&CoUzjGCxEQcyq7L@| zSS|F}lF0e4{n5qS7kjwC!X5oL&NSZN4ExR%xdln>(dwD#i^d4Vl)wzr+%hol6UsH zdB1T;=1mHHewxb<{bG%mIxs9|87>T+&-HJV`0F1V6oYfgbD=AR_;^bVOTUY~=JvjOp8qS|YQ_NfukNEMj&^D$7db}-yT#ZIv;J=0n&OV0PA*#AXX#Cz7HXE1X#g4IXFzl=3;+;n6<|!3|*MENS z-}d_yVr&NMR1{g9-XR$O*bhb#T{)+P_?VT8L~dq=>kxnH zirD9CIp;1thdt$iU&=*7R|N$Otlz{N-+!UA8xMEhZ#_kRoPi#mx-hGofEO;@qlE9B z=jzZA?4=zB=Z1V1UPP|J$idyfY*h$4sN4rQIf@mV0S{gtCNUooj*p`4P*-*cH|6Sq zi`zK*z0<(1w*E8^KT^r1p1i@~G_O&;=W?hn) z`#3;=*K&?eD1@s~>g;~=Cl32{m8X94gqA@+xHhehD?jJ(n#+LeQ>&n=*FJhX<*V>z z{S_>auEG_+wqjqCg?zz$E)2C3c!|6dY_+`vznvcl+XgO!g@gMdCadr@^Kl$Mcqx3| zkp&Oj!f>lXJ2lB^bF53fxT9Z##BmIub=7fHTj|XH8~^L%Nr~vmE4ZP}kk9W-!s(6P zyeDuJAHH#f>`J>i6h-$#wHp?sO)`|JycS>QOyCJWcfgnQKd|({1Xdp4iicMX6VuM_ zpiSd1W0(6Ud1>`(a9U+1vuUXW{myd2&OR3xXNY;IO6wNz(pe4KFV5UtAOj$P?j1Fq^ z@#n@eP3Knf_^s~1O#@`#&f3$9EvgPz=o}6=E`l5S+d0?R1pBF_V0$C+Q1@UlvY!n_b!PvZeVRD+`*%qfmz6?EstE?2=)oKNE`n<*r)j_DMQKOL zbgmj&Dk^4OhROM19N^g#J?6U6;-(kSpb!Vvp3~?|4}b9e(o7Qr_n|m6SUfrPv*e?{ z2aRmGM=@98u)kd|Ec>Mo$@MpcjKTJxJjaP+*VWR2LFu%(d@()0r%rdy)zflyCEl$h z#{=wi!G3Ewnm-tfD!sDt;}YPD3e`tA2v!s(Sjn@UUnbx)&Ch{1$vXpNh_(?n8ie78!h~ zhJWw6W7@oKT)Y1`)qR`;56eFYp-1mS;u(T&AE%3f{+B`VtQAa*Yk^&^iSQ%(I5?;$ zNQN|w=D4r>Y3EjX)UX*yZ>#Q#FK4`kUUygFt9_NE6nF~;>h^OuVZXnwGORGo)>=I1f|@>(5z zv}2+B*mH3Gss_w2FJz(B(uqhwubIJ?)c%uu&WC6d~A1R*B7sXhrrsgMmA=G{jJ+jT9 z4F=2T_r(pIvcsLfT(`hjwd2xmD|+K)-2*T=GzMkPWB5qfIk?|_17=T-ht-bV#FU#) zVa&Y?9QE6YM>LW6A!w*j+`Aci#Pr0mt8!`B1_zdR&lCF3_ZQM$PsUphD`?N+8?^q& zZ}L}LC0s`Nj?cysjlbeRL>li^v?7f>2q4*CS$EF~8H`gP`#Bh4H{X*_SR7mrrxB@LYXkY z+vE?DdYT*d4nF{=dNfnOj+?Z7{xZH0xD+m@Ho)SmJE`Y7ZJ4>@tikyI!5=Vqo-VWq-hh>tn80D>8I%VycWK1=+Aqv9Hl6j%|<;0Vd8*(7!nXJ zU6t==uf08=Z0w?=c7aV1K zOMDc)N7$*N2zSB{L;cHZu;af(ad>irB=pBFUY|Lb!T%r7?~|atO%X4xxJ$G@RoL`x z8l@+Fp=Fs%u%>YyN~U)~qf17@*TQ??y~RhE`%95^HRZ*g1u67xcUSCF)FKY-871yn zZAm|TGa#+z1DTjApzbUQXN`|Qr`fMCX8u*UUI<+I;vb&a6GJ_!Wo&okEUQi5MF9a3 zaNnyBg)~2*xxLQ_-mfpxi4U9MT6zywTK5|wuP&$0&vU6-D~4xtSB#F8Qtp9Q^fD)( z0?l-IMMenP&fQ8wi;Br`*8%u=auK}WtIOd%`{A^l1<>UA9*@n^#1dU=!Dnnat}9fB z2j|X{VO9>GH9N^(rsjO~iVJ7W9L|bs$3oBc8N6D{llvC#5T74K;Y!7J8vZnoNPUlTouVF zU%;iUSFsiL(O=thFlNw2>CfKD_&c)*8auf{mdiO*^NOSJf2VP@>Iq&H@tY!EsG^;U z0p=MTr~2kysNX-V^Sw(E`Dd|YLk*7Y6Tk_xC$V|! z9?ZXYfCi8<58a@M>pu38wLSXEcg8S3IpBcN!%N8Pyf!2|4CJ+!^JxAeUl?Fmz}=JW z`0BV~9;2&^4Ug@xse8Q;xTl9h?sqBA^=!kGg@@T>=)KPO`Wn@A=>{vGE{DfQOQ3Py z9=J45BJ`SljE6otN@Yt4C(ZiH-*z^!V(bc*qY^maei_E^^+xv?C7ci(!yC2rS!V;AA3&NkW8(=wJ zkLpTi)8iL=@xO_k%qe~!wSUw^)#@&&R3ZbFn|(NK?hIM>%Bhs_u7bW%q1Y1F6$cz2 z#0AFBuq=KdZ+Wwn&95DY=lhh{XVgGi9rcx(Bc!x-i&BccU zBT(ncBHR*HASv4N2C4>o;QOS#kbLe8tWr8m4O{k8+(l~$RB#YJ9;$^4>Y8w{#E{qZ zHsIHbBk@48A8FPbuxFAcyY=bx%*OY4`{(o6J{_T|6j|@|eN29N8fUu4V^=3DiC&c% zj65mNIq7A%cV$f+ElKP>bENga_F!S3O zFo(`QZjgY{v=m!?+_@nkmJPe*(E#7$5V&I`@n=J-Y&JsM;cdcLM;l!E@ENH;=mFC` zkJ0C42~vk*1@yP*3f_G+uyFq#>Fo7#D0`to`y}0Y;`t`>Dencxd;do{yC>Kyj(IB8 z-FTHo6y(yR=X=S{b_nd9`dqZQ7msJBH^BGn8MMM9f@ZW=g7=jdBsbLnEOxaB&Rta5 zH<}?oQI7I^_T{cSGNJ42F#dUGpv+PAns7(lBR1P7veApNf~YzWU(9V5jt5@A%F8J1 zFVCj(UUJ<1Ljh#0ROKP7C!=FazF5BIJOv%DkO&WFK}%{VHVE%%-A`rSU~mk!jU#RFQSzk*6H8{z507?7>lLEfIv;K+|#w0n~?6;>9}n2ulY(Wq36@!QT1&1@X3 zAW5Q9R3csz{K+4j5GJq{R)3y) ztn-i-Z3#njXCotMJCmuc&vWi&(GJK~eYpVDGVbXbgodx1+4to@R@^leRR+E#&0&qOy48Xs zl3L5QuYW1Y{*fXMJD`U;HX+bvlme4mZ%gu)d?AII>o`_f4j-;M2RX~N`Owpgurq4C z^vwNUywXt-Qy-}E*m3%Zqrifh#5s4DZD&~L&t!R}Wi zM9G)gU$}P@{N6o-^28waQa#R}Zkj+goS}I$6iIVcH*Ov01I^Q}(Q2jLOz%vnJS~DZ zWYr7%f8&=stDrp+k}`-MQD2t*L zjhWbR)(U%f4Z?{Zp3*tT`fgOi-8Eclg4q%KNly*j@3^ z#Vu5CnM5}`xtMYH^WtNLYcTxCe=t?0LKr?bRlNAwoIS%1FI)8ADqLgW0R1I{ zFmu*1%=3(6xuOfa`Fb0@xG@(O?YxQeHuQv}*?lqdp$t|=SHYxChQF(K5kRep*ke?8 zJUsHQAlK@L4kkwsi?Z-hpIGj$x`^+2cH#W&w8=ju{HQ2@Gr-y*5HdWciE}ikFn<HDi#iYu2Nb((*}3jYEoavHuzTAIoIOC^u7ONd^>Y3 zEl7(MRNQ93?rUcFY4&YgKdBwuA0=~=)?7Ma8TcVQM%{*%wx}Cg7676#P$|&9Qb898ozH5;whokCPsX_vDAsiiL7A!}%$2ah3vS zE^i>;{6UoXu@Y8|J`Oc$|L9a!I2P4UhBKl(O6ufLrJIo~pZk$=ath++2e`(16sk!_ zz*3JJP&%&&eN|8_x;30M-kd_0Ug3DyIY(;J={IU`2k@-+6R7_F1)b@ffjDvJ2mO9| z0>fShVbVD*HZlk#-&r=Wpl@#~ks-Kg&c^K?4UqTfr06y<2^upOHb-ixu~nG`cx1$FUm)y#4tW1PKW%TKQ4`vA0gehqJT#1JcNFR!Q}OFA9ggFbE?g7 znT5tqbgA&=2fY>fWatGxaK;DkWbTH3e~KZrWikifzXul%kHtGX!i7U;$8xu-5cG6C zLQZ4H^TP2;=&7mCS4Y;v&2x)O)}x+`fv&=rl}%@iYo zQ^9z`FY&TrG5Y@bBIy@%l?+elUb4FToU9wt*j&#JesxJDoiab%TO{JKoNf@-B?@Zg ztI&3SJs27tTUp>~yVB zaA(9bFc^4&tTZhl`fD%Q__HxoHThj9SKEWvCe)EhehMzB-9?dWF2SIP^}_i=1#%g1 z2s#dg;M)zGL4R=zyeU6JcYm$JuUAf!#aB)2fd9GTdu`xk>UswzWbPkB)-yZ?FAXV7&x zpoy@1=1(EFu?N2ool8$2yNV&tp9yvw6zRb;51P?)sxYKSPi#12%u|MBvc-&exT^92 z8cWnb(?P(H8(H*bNCC~Q`YY~<+)dZ+^yctyu51r^Kp)5tW1>J>$ zzF#@mWf|7Wm5FV&6~c(!*;S-<0;`I0UX7EKCo75!Q&)NjL zQp(`${Z+zr;RGb!Y?7YHXclz*b-2?-QOH^)j*HcxE2k3Zz4v}_e%%*mxt}2FZh-qf z`BVS8%i`tDHlXvQCmz519*j2Jr`V~6nBZb0Rd>7r-rCnwy%s9WHqn~p zPpOaTUSaRQzd~tzSZAgmi@UEJS>Byac}+|*Cm1FnLt6GoHOvoQ8+n1O?L zJm-Pf*WpjzRSu}Ah8eSZ2rXq6VA#4<@&J=Lq}2|atq0?=%qlFel5y~#u@rSASG4?< zAvye?lnV3~pz-P{&^uIt(RP~X_+X0U^Y>?BeMZovyzSyYon4*KyY24j< znxE<<(#r)TH@_UYwm;s|ap6fiW{AZXU{UQO8f#_A&i`(M`udU3HQP=|m{vutfz}*T zdkk+MNyk8+I(qAy$fir~k@BzJl=Uo`Q%r{lsVi*BK z?yVr<$u*k#CJ(xY4&$he#bD6*7V3I#;*9Huv3%gKLFi_Dr4&BeCqjZFr;)?QR<-tGFWp^FiXBgHyhGOqkk3sa#6uOUt%yV z*8~T|D@f1#ezEtrT*G&j_kiw_F5GKhgIMOS1-le>3-2|@LRL;N$j{sh9*=uUPIgzs z_yB9Jl<%y0-J^KYq%)#*QZ&<>I2!u1Sy1V8w>wp*;zIlfi?S85r$rcN&Ce5}A1Gtk zDRr(7yGY*J36No{*U4)q!gM1Ej=p{Z9&0M`)vYS{p+lehJXzDPTb zD#a5;6?|%0Zl^y=MzggMsJ-M7lr%YtWsjw>uOJ=oJ=w)-%S!NA_pN-;yccddUW>k` zO&s%3@_IeM_)gX_ zY#*Hvlz6xSa6jjZyrRCF2!;zGjkMY1tv4?*JuMzfX{NHzaa{E`q?6Nh!AI^zpm_Wk z6~ZXjpg9?lBhIbzu?4+t04YV^@We+mvzYNx3` z6=CnzGo0>}PZuZE@iJvC9&QuGL(+RYl&VDW(SYufsVDbC^5`<2p-={XzQ>@>D@N+H zrY|~9O~<}@%cZ~Gj&@L9IM89uj1=5)&>dxYnzHDJC!pBjwHWqy7XCQZ7bg46V{+ZW zli!aL|C#9F9_2V*UZ=tD^u5r2ycO$gyv_#IoxQn<4FBsB$``aXWtMSka9>CSA2eD+ z(UMP)Kjm&`FMNS-g-=ASfhK4%H3LsxK16b#eX#a<3?5pymj^7A(n0}`Oy!n2A!lX<}DQ8>lB@QX2X|~260u$Pcj((7fde?#9IF*n!R)m1<(EsTlaTk z|A;c-L(4uoS@0Nwi>#VfV6; zY@xQFTdu{^r{R4Ci+43t_Hq=fkJH7lLznn@T^b}?WU-_5Elz3fDLdV+0>c7wd3@i= z7}2yHL)8CKWDK$UaswV|a2Z1jqC}k$M?iRTKxmowOKk6T2+Cb%(zqo7c<2r|8j z;cXX1ZLJC#GJ;`Fx--g0oUk8g)C6ZcebwO&U1(Z$9^uS(DZXw2g_DP+UK^*wic0}x zyS_Jt@BR#pe_x2Na(nWVzZ3YDZVJWQZNqI{x}dQ89`(C>N8-QKn{MsyMW+Xv;apz@ zVZgOW@_G<1o-cXQ5 zx$x~nPh4c}Eg78k8s^H!>IkY4Lvt5osy6Hi9ZgdC}&o7`J6Tg8pIYP*3 z4}oQtJwElN z>=txcP{N8k!};mnmrNPs(c?ll%FDb3IVTe#wbP5mZJWbtMXNw-TO&-%7y`5QO~dRj zujuE!Lo{TPwS8nh7 zr^ah1-=>@WRmFQc$A!7W2$$#_q{Lofd|Jm6Z*-Y~Eo8a@h;ojMN_-s`UwF|6J@0&My=XVpmcP|lB8hcP> z;Wn`PtWRq|1BVSg0yAA!;+ODH?Atkm(saw1gTH+zm4Q<+C}GU%bOVe1!R$mv6H z?urLWTyFTzn$FS35{qYpbd8z&T;X zLJeMFp-$UfbJ=5dJ`|720H=!X^yQigKV5o&`aiD}O82!(8_hq#xI5u|(qDyJigobY zwsf{rPN0n&Ou4bm1JAo0fV9D_u=D9f$~lrE+FSJ|r;IFG=Jy5qtnJl(tTmN z=6P0A@B;a)%jn|6bZL(*K6yQntDD`yr^kEvdSHj(EF^H|BRTH)v==+vTts1<4SsUG zC_4B5$&9#show00PLbrmtQb)CnvZqccf@jq*I za6anHi$`SB(w|Hu8fvd9F-RlX22_xtgn>{OV2V;*9ZA)g))f`%Cv&?$EW z{`i-{c?*;sXlOmY`Vh+v=e_Xa!Ve(mJ{5!&O%#;1iIzLCs68y7X5ZJK>RJ26B`z;% z?-G4}6BRCal^sR5!906whuIY1&s8o^W;~DGpUd&RC9b zm!_dCX@G-v+findu`=fkcUbp{Jzl?~=8%=rO&Z{Jh8JHuh(|6@qK>CO$aqvPmG3N+ zY}acSct#TUyt5CFb#a3%^B`83J%VRHS1EaKeoM9+IN!aOhu8 zuGLdU6}9WUp<^+0OFAs{>b*x8cPx{uD>mY;jZ#$HpUByVw_w+&1-#|&UhblmfaRez zxckvcR_m-^&ZjLpcP>2Xs>yg-s5zXgyEvn6lOtM=sOt1^Yf!ixML)Hq^ul&6J-MaA z76p&!=*W4z`R`}?UNnjy7N%p!n~${7%}*%UlPr#&_K61Q6bgGLKZb2CJssS1QpiO1m}d0G zwO^8?CWjoIATPPvY0`}lG$xGqJM`f!c~_j6v6s?E zMGJ|!`GS9OqBQJ|DrN^i6uSnhaLSu>zU3bPs$-0B_P$&?sy+ke49>>HNE!U}i)M@8 z!MxAaeO27@N>PUo>~F((=J_dZGo4E9rmpJJKbr2%;GbOd{}&ZD1+ zL2!5XRq8uwE6mq0;%&}D!Tj7Fd{@<1h&`!^{+E7JMPiL~R!%DA&G`bMXGXAUz7cJo z?Iawo|0cEu1&~dCIlKRpqJF#=d5uh@9zJhj`p4(|*?FsAYq*1sq#r{M=YFu`K|i>& zca?P2AWwKO>k*ZAD`Sr`d-NMB<)3D;bn}-Xt?k_JiVN6CDpU4?zi~JGcA^V9mn;)B z#?7QDLbx#Mpdl29cI4vqh5~MWppl+-P%z3~Xs&L-~Gc*feNkTOo-CZkv5o>_s z_D!Kh6CBY~)MtOUF0ko@E1B8#BgY{NC`3AltuJ2yRF}iWscrT<<6a3SbKlUhW7DMu zn`c6fQUaMuvvFnYbV^qU!H%9=xDPaQjrjqv2+pRGhn;()?a!s(SKIJ!{l|E?&R2+< z-w6IEOZnWWeweO*kJm>4=FiDy`6rtE=J9n_S?~p3L>;6hk1GY;J)N_|cXLGz%TcsA z{*|y_B^@jq>Y$)|5EQ?0q`x}>gY0xrwMx^$w}+Pa$9N%aSY^c?%i?hSs}iAQ`5zja zd_X+>y+!DEV+ol2h@!_M&G`2HDtfiyGAShM(Ds|RVZ5diPTf3$CR}x+hN_qJWBx$F zV1Yhv7~7luF4har%}+vIekr+rAJ2(5neOWL1e>E^8aHC|9<)iLn70_G0`QBg3f8{}zmlnnG<`)oJ|F3)t!uDBa-*49r0k%Pk&8QQJ9l&qZ}6?Q z8JH>A3j-4W5cgh*ZQ6tB-b|Cuo-Iz070wqA1TPh~)x8rA*Cc`YjIQ|TK|c)D*2YK2 z=Lnf=qiN6T6~da=r{UAVAvh?qoi8db$BvD|VQi8XPmDN4sqZI~*+eaVYO{`C$hvdD zU3JXTn}T6(E1@!BBmWp(44Z~@vX#SD@MLi@^dE9WFrS<*(SDo*nU#&A=i?=)Xy2D3 z8$SuIg;ycO<_Y{Wy^oqarXkld|<@P&{hJ8EF zl55jBJ*X$@-7TemXWd}&_MM{i%^djC_!#t@oa=8-i)Wv%_yDD|NbFf*xFqje0+E3 zqR@5Gyh|;(-J69gGNf)+J8f251y#X)|gue z+iQQrwb|qNK_6pS{jev0?yks%30H) z4m?ClZUlr0~u)S>%7BD^Kh6 zQEOWCc*nwK)O@bQd9;Yl-uJ+!Wg6m$PZs>%;5Y}RpX9gF2Q)7#2t#*ENO+eGRxxhm zm~;cSs1B1&2@I!un;co;)=0eJC*cLNHsh-5aX54FQFOKV$eWwGqTNmpthskr;!}7S z?-cCg$E|^IO1&K~tQ#PkrymFVmrN2r^9I9egdcYVBZH~x} z;1Cs84lrH8B`)n{o1*{0!k+dx=-egoN|T>lx&8!nI`4>y_)$Y6lqE&NmAW@b&6h>*>!&!&E0{00uf>E@0dOymkb8Z6sy7W-tc&?D*Cl^rfOZr03`g>41A(Z5@=YnsD6~FiIfft|Gpi#Fq zyysMVjM>ciK>Z2ayVQ>dPaefVC)RM8%0S*?Yo{=7J5n5fEsKKRsiQ-OD$i&b1!8c)+_4U+NQu zAKGg`&g=>?q3=Dx<&2|fslOAPtZ&MWwZBX=b$hYr`&T6QF9*9>o%wpBt1>HSqabL$ zCWox8IC^RUI=9)&qlWfFn^os9;%;ZzGd#s-l>2b)=RFW6GsC;`a_aL#mAegd#m|9i zkeP8y>Xv>M{;pg^d!_DzkwJu@d$@s)M+wqwGMYA_15P;~jF(C(u;Lw+?fPCvX&DID z9o4v`K7Z_n^lll-OM|B1y-rs}7fU5w z(2;o8j2~qc(|Tad@JFCkp(muLSkd|udugk3gSat%0N8pL(TY{?$f~$byzr?TmOUKL zt(|^?cU?XFXblsZ-fHmKJ^^A{;4k{JqJ&;XsbisU2z0j7<~dn|u*abJpdUD#JB|8| zYtpBJ$($49cheO9YpY7#=I06T|J4e|C+CXed%HlQ<_{Qkv@_o=3xWH)NB=(y#!a5g zBU6rWUV0$C8ez)=I{3k^-0t|bZ7f_ZG#9edK-O)~Z?MP+MkBZZ`f1xCQ_YOlg$Cf? z89Uf=UJtT4qK^;U2I6WBJ$_QA&wF1-lG)ZA&{^K3NP9M)f(A9i*Xq}_$tMjD4APUF z0I$TmQP24L$`iC|{B)V_;K|sLv#8seJCJ9VC$V;=xH`=Z`o20TCJ*f|CghXHRv)bsvVHYl5Nt?7t*_R;~vzFK^MrGBu@CA?l5jRPasa+mQH!Z@dnJZ-TqetMrtOPAz=dsdb>)G$<>P;ke&u_%M?c#RX& zPDe=m*gPK8{e;lHc`K>ipGx0wAdQ?JNk)2YxK}_79(@0U7AuCaN8KkH=XHo_^l&eEU7x{>PLE_i-b4wbPpm@6j921AFB4_2?{TPMT#vrB2cg_y9*1@?f&DSPXm7Ma z==SVAe7Gb%&n0Gp@JS6!&hMoTu}g&U8i{P$5DDMshQitz^Z5P8WZs~9R$N%$6+KR9C;JPkGfTvSdF@d1>sPRCP7@8Zv-$82Ybe#6hSPfm zE6b~WI7YRAt7TodY*-0&xM9cv1B%4o=gvd$+!TfJGZ7+Cht9gjieBd*)1)1XAlN?# zK3oY$hiSQ_*Y*N;-fh5L+S|e8<0H}S(JXYm6U=Rf8)42LC!VU+7gH}xhBS>?G)nt5 zEQ{EUlUC}pwUfvd8PeQr(lYM2=OO*pcIO?DKS1+jGTaL2DD;mxM2kB9Q=A`aBfKoy z47=S7c>Uzd;65Xk+RkvmV&xpc$GU{ACaUAWaT%n(aUFk(-9Xklm*K@3YYeJ&#D95- z5bkb|)f%1UlOsnc&*q;b?b~gzv->l!T2xDPe>*i!Y>+*kcL~+5KcJGul|1-SHy(Gp zA8$H0Q<^mfa&Fr+UcNAe9(TSj*y3ksx8*N(n4ZjvcDIF_>-%!P_6G6Ko`2-5qlJ$T z{D;Z2*0V<0LCF99g?7$-qFCiPmfCDe#J0Wd0IgN!j}~cRC*QqXy5t4ihz^59cN_V$ zw*%m^sj58v(?RxJS4rh}X7cY_KgD@<*Upbh?JSbE6&~n+h;@Ij0G+#uTxI$UhNjdC zp>`pxyz0i$+Y`BB!8>8OdRyVu#T-7;W)^SB9SBo-E&P|10p5x+tPHt}IZ4RdX4S*W zZ56oUDe=bf4lr%v9CXYai2Zde@X3I9N(&r{A3h$&hOLq@S(nLi{g%!5hJ zJ>?HRe+I|aqm-^YP^_B&nBLcF<=N$U_|{jx`CN~ckV@m^!^Eaf zt!4AAiMtUWQ3lc{wRovQOr&*x*WjTQN z3gr~uV;;{d?kPmhY>%#!+~Ml`#}vEtBs3>$^S&Zoey~3euZ2Ap-%bjGT~%IOxuz=y zPHWG(H8$u{ahI!e$MMnI$yjTdMmwA4k^bJz!T?LAvYJ)!r9(U)w#ner<9)HG({X-W z|BXM`rtzI{FHQ>6!VAxDQkH*L_z@e%M)i4u|Mz4JzY+&U`Rd9=lm0=Ep(D}UMO#cx z@x|ZYg2>$cI|TWx6fXrlXKkNExV`ZhR!RMfYXzqv@KB^e$SMN4jtwa4M>x07U{%hgxO`xn-QGEAGIpw-Xf&CFR+Uy-KmdZ9mhhg7oS8=QG zoJ&Y`#U+_{)KzlUFyvu-9B{$Xw_;V&Td*!DqWr*tkT$X}>ol9=r#)RTENnL2S#dyg z+p`KT-K!TqO&h~eUB=;w@O>;_szLYWmBEc~6Ukt6lymfql0m<2k|MxCz2sWm_ zo@YeEPcD4^^fAy18%6t~&(X0R+p$zhA8@;@=VxrYmK->-wL)iZNt$iJ~D-W zE&Vk0q33!#VBLUMAXB+Q<=ehddG-{l`<4&IzFpW=-vQH)wTNZA@58y-Q^f52Mfm8l z1}$qB&F0VSptoiu^k|BPId2G-wT=<8KC0r;mZO4wn7Vj&K?v4AalxOOeNh7GF{i^I z_&0P2q{b|h<^j)ux2ux;^E@j3+=nN%bP}r1YE!7Q9_@$^rmv5j*ruJ1@Ub?VHqK8G zhQ*GhiZNEyYvvDO|2H!{I9LTX*A!As>`5q!UMoIK){@sH7vlKx>GIjuF)(MqWDZ+0 zn%yrRr(Nf|;iyZF_#$XDPPqiEu`mK2%xwj6dL(+;ZR3TTooP~!HR7I^O>}#YGw#s$ z=i~WTaQrZTvg;hq$2r?C(?2x0eI-&yD6OYaOKjOAXYw#)82!8_)@=l=V)O zL%ol&;2U-TDgu%eu9APN-=F#TbCAF6oy9v?_bv~rKA*#JBW7TARx!LdGLGz8d(f;4 znH*pC5)SQ}NN~O{B!)`Q=iPSjR=chEaz$IEeVcmF&CZd*b#pvd5kfC5o5c^|jx<7T zBm0Jj^Q%+peA09cdUiJErgzWjTuT`T{3{VUv~dLS(m1RRK7yW&J+W--ZAf;Af+H<= z#JoER6n8vDIuBa%>cC+*WwaT`zj!XDe(r}$%a+0D8xdI6VIJpuogv?chVp1F7aY6h z5bWxu!8`3L#paw+2>W`4^R~Z*AALn}jzu0lt9}7(JK55S*E(!wG?<>X9u%gYE2qNx zpLEZ0m*_NU9%T+G6vudY;IB8$G^?!)egC~@-_I&EZhj{kU6n5wN5qI)vy6Fugbf*J ztMdk(1<=_vmK!=1qIDj!(}W~(;jZE|oQFs*%r2=eY$pAgF4# zY|@txlrrw6FlvsGd}HK3antNQWUA|hx*_SZFOH6+sI#C3qw(bDHIuR$mcm7yr&Mt@ z9DnM~!EDdiRMx<_@^%e^=1W4 zUX}oRCYWMOgz7`FRn6Q3M(4Bu=+PgVMARTj7@jIU%#7# zftpLXq1g<>I_Bf9ZtR2Him$Js@IUDFnt_kM77|GlEnWNHf2&O*ii@~Q)I>)EF3Hx;-$@@QVtQZ-@K22xf z*7^uYAMk}zr)SXYGX@;JR)yxhsiy?TGnjaR#h?FtxQCw>ue0e1Ulx6&_~UiZHghiQ zx)n`3nmQ_f&)7*emb!FO9xr@sDiJlvj`dR_aK$fs%yLY`EY)7@ykF`+$3LQ>k&mfX zFO$5UnqYzMdU(9W1Jaahxx&o^OSKc>{Z4f`P0Qe4V^issqcNNC6z==Y2fgAii3SY| z(Xs3UkG72$S8fjDOGh(=-piX{;DrOFR@1gqtFtyIM5qWol`&$bVh~Teti(>`XR)ex z3QZ5ZOm}AexW(i_6>!p=9Yz(D96-{CVnf)B9iGS49f@o9*JnPY+#m{A4&(-<3j^ zbmWh{+eq2v3i{&g&Ot3c@Z|F-oG@t;_v#x;>4AHA_lHBU^5H#rRec_8=Xv6T+qanB z_T$#U7ieF{TlDdn0v}!9j{!%|@n)QYQ={#8wW_*u;re4>JmdzdoBWW~^*PBWOcU7Z zkCb_*NSwXq5LDYFIR^|jv(r3%`S@+Q;{LmS;tll!7%*) z9Ww!cx$bALyN7tq)-*VH=LnAc?#Lc%A}H$cIO?LB%;MSK(Dkt|WQdY)^< z%2X{J_osr&mkHEIJ)Bid0=PwHN0xpM6x$!!(2DE5sYB6Xu=|@sPCvimuFLA!BQy+` zpG_lgm2q_L^IZBIn+4mv)!}o;J@6&Pk!J05poi0GAAAj6ptUy$#m)55ucCfPoHBYyq*7y0H*#Nmra@;#@I;F`Rby8p80 z@O{B(R@0eBR|bg3yS5P~gzbi?!)=W*6vWBQ_$DqZy5Z3jtryc>> zbg)wreGk&cEv6l5;f*JX8y-<`ct8;h;NcwR)hzD6{!-j<$xrIGNnMM)dFb$?0>-Yq z$0K_AQqL)u=wtsov~qV8d9_!?7dMB)v@ky*$wY}2(@x43JJK(^`*V+zz2z5j8rUo36QoyEkoMf7dn}~>7eendR?zS0Bm6QcokZCg*b*{KNcXd4`B)2{c6uaOFOJ1gF7L$Ec1XUR z`k}8zXX$h)|=xotWc&$;0oi_Bs0d;ZE(BBqs zIS=Hl_Iv2?yj$e>s8kU)unkPqO2C}+j+Fgk3>!Qtq2kpMu+6tt+ThbsS(HeG+@ImFq-ooGUR`jE&9DerL zkFx7g;-l%N+&QE_1k`Ket~y&-GBp;8KWdU;&#^GTxRO#PPlvp>KOytr7;?+~r?|9M zVz?Ur(7w9T8p64IE^l( zs?$l^WL`9Su>5jOe`v{^0V!mHaovJ>oB9~B<;-82tg%?^tumQ3pPKLkc`ay^%%GI` zyRw&;-GpJvt#G7fKbdZo5Lu(OXut0`yWr9T&OL(U;FXW}dP`|NA! z$#fvy_#8scs$IF|Z7SGzRKo-9Qh16(3(dS@1^ed&!3J43modkuV1sa#4jP$ba^zk( zay1ovd_o{FAc3ad?S?&$#dF5Wd*Yq6kHTlSIizXTiCmLZgq@$yiMwCkq(wbk*6j`k-GX>W`H5^HCcaoG80!g+f1C5cJ{ zU%Gfoow=ECi*}q37Vk7#;Paa^u|`|+QT8fUIE=S~o~kAsB|9b_9h(NPHU^3drhXDn zD7FYyF)Q%XwDa)W;wDT;4TISeC$eA6E4rIGlHEQQfyP09A!kXfOs8@Qm0RlZ`fll5 zA-Qv&=~}RdrUkCgYXHlrtwL|pZM?u#6>FFJV!Oq8sP0-Od5Nv*q5debZLa{>Y~6(l z9XIppd!g_UL>TR1peP$v3JrI{F@Jf0_-E@Pa1FHLwg3Lm{Lb#!;u-*I{Z_*1j^=RD zO7dkLm)Hdzb<#9ifu7MJ@b;i4-#Pph3MFp#$lY(W)qEb`_nd&~FAdpt+gNe;st2%k zWKS>|AW-2lJ-8A(0k6$WB#qZT>{;_1^6z9RPU`sJ0h9Io>9PY~s(yrKi`6hwr-&CF zeM!$JjK>oTowy{effxPD=Y#r*s1kpj793ZVXHMTsZ?7oCrK9$NN7^dc?&%LXs{8@3 zoBe~6H-)j?@=4rb^=mxXq|0`C2ibRM51!xY5Er*MCW(;{%%_~7l6QcvdAVe<@;$ZB zd?8*waFM!JBvIa;8JM>r2M^gtil5b_d&rQ_mAiQn#j*LHF`PZ!Ul#HD zlz74WC^{WVi^y*~4v`|Xux^9Pbw{cYUg zvl_iS-l4?~UxfN&XRxQYp8S&8G1-e#g}id@UhHY~J{Vr%s(P#Os1enp$l08-i!J+r&m{b$THWl$4`O!f6t8+hWZ0e4;5_Q1BGlkt3 zpU0dh?eVzNFe>^U%O3*|W475*N?13ZNA%F1Xr7lz~Vb!W(8{V~4krGbU%-)Z>DMDkfSorb^ei5c1{q~BVP`?Ab= z=X^Kei;5-WHI-shskJ=hGKq6ad(#rvO&mWp7e40fz?8qztY>#y;l|01Jm8%=yve?Z zwsz`x_2wIPnAsI=lg#9+94mSKImt8WJ_r4W_rd7MJTdds0eI-D!`7d|rSG`0JTB-A z4(alZoKhRPr&%a}PwR$nIHW^lj_eF8c2Y%&O1?zTd;OiHrRC+dt#Vmg?xTwH+XVanG z+9`CyYB9LFrO*b8C$!mV9&PqX;@>86^dFXq1|Rit(jPNSKBOqMyW$6PcmTidypLtO zI&kMvSIK?!3_PS&3R`57Q?cqf?0NK=hHq#pJMzf`WS75F$C7B8n~+COYi6*2+Dg9p z-&93%o31$jbDeneb_dJ|N~4L^$?Q9@f>j262djQw&^1|L-`n5Fe{3$-s-?gvsNh~L z;k@^05UHtl@TMH)i6kr>UHLVIHpOug*8u?icHq%XwIBH{Lh*00?QG5LpXgW`09- zG!CGXo%DFa!&fp}_jt$%cqicazSQ+@7tnFjR6LMLJ>V+=5D=(=^f0f~py4_FrnB@T zW(;Zsm_g5#(|FzHIlRRG5(Kd+=lA~rf9AHur>&R4VBvDIZ#)1KKQH6%#yP}?i(wFr z6#QIUK~rT0r8W&vy{&$U_^C(4B|Jh7$wmD(P%v51T%R~tI@trR}QsN+= za!SmRJU81t*ui`jTPA)IF1=2I#3h3;-X@=qZATtrX^6hxQt;WENbsm|p|#(G>B!|4 z7x9W4PJ5v$c2;kohbIDsOS%?I_rSDQ z#^T$Nxr&n2(s$tNI-E#u*gWYP)V3dkE;IK~!O?{n77)yFNuA2tJ6>iLqh7%Cqo^g( z4_hT}B&&TiEI)P}(3u452%l6r?jI<3HdW3PyNv<5?^$xABBmOQ8-{&=%P4w)I`NZzZ9I7X4m zFLk^qM>~^-g{{Oj{mVO6k)6nX0u}JF+P2OmMdyN-j zcAr%A*=`Dn1NG^5 zgUp}KrZp4nHewk1$OT`|n4k*AuvIF#F z`6o&@{3^a$cZt1^9R=IH$6-^)4qUi!k@)nYwsOs&TeR?08`4z$E=-KJ;1a`FzVdkj z*@`}*{<2z#ESvxyen;7N%rrikScvOp?Bx03$7tTe*D_o6{%~x8gRESBT$%;RajeZ) zA?LNTF!W+I?7i>GzVSNz!gm8%R?Y`yM}73jUoQThc0t&*27K2bB6+^C{d?XoXKdj)w*RLP?gppRF(4#1XN@xZ1#j4~?_qM_MEK&^0q< zuVfqCq&EO=YxuCYhk;zXIu0uv=iv3Kb6jU3%~KET11Vsj+?5@0esUr2)49pEr=3vi zSq;xo)s{;$2e>_;Cpvz7DmyhOk$RtZW2+zWbpC!ad@NmvlkH0&B6v8vR6K{8o%8Y7 z?bTvfx{A{L{0!E;)&~1*oDC6`7s+=xC_WLW}cx=GNq5fPX6DdElNo+e;mpdmlP=2B#gbbd<6_>w5Xo5d} z-KegdaJHXvn(Y!EskW6ZbdN%p4GnPYjG?@^AQx{=OoqR*42g9^m$a}{*|f(EG|MOf zeRrOOYx@0!_K&pXST~<9{kFmBl^y8+?pM#%`JCzb8#eU13*$ZXB=j$zS%CKIrQO}e5 zw%w26e>C~w;so%Y5hdF<;FGK>{3%rP@OUq-v+FrSw8de8H7^j!cd&9FFBH5H_)vaCwaNSVfeAQ zP@0b}=WShdm2FP;U@u`jRwVVp<2Sq0qaXgz<=SaQ-el?hrx6X~ZiLgtH&)a#DnR(& z6h`IIAwuoq3pi|rioCXXwo)(FfVT`^#{ITjB?wqihA-NYb@);GFKG+Sxt58aUM`Tj zS?1VJniZyg3xfBb*5a`z?_mG&wfwT?1OJ}97|&en2X}2JV|{oMo_&6cKZOov8$B=7 zS)7<=k>!`=1Og6vHXsk5uf!DI7j#OQP4A`=hJOO$eoCCB-2 zKrIFSog$xqZLFwe_E`9Mu?${rw1TsSm%%Wr9KQHBh$A<7iqg&TR3IhTY!Hk4@&W$^=_7Ijn^`J*+u@UR&7W=}v(c z?PXgonc$k$rL?_SmGcf?k?~Cfd}`8$dM@jaiT_eKOvr~5>LYmV{IjA%TQ%G(aTRap z#iHFJJ+XPuGPaq0MRe-Wmbb*(3T|7H;M-QOQlIgEVE3p*4m>%4eg4Rx=igBFJf(rZ zADj}m$7c%))xB`xN``F6ekUm0^qZ~+NuFw}JMhXmm}#Id_j@%H53bFZ{dFl7VqR{* z&-c^V&nQc{Uv`{rHW|Yr1Er!)dX|fYP`X`wS{!=Hh4W2AP-pTq@omrJbS`KV{dl_$ zGk#mc6z`*yQ5S}z+?=o_c^A)HIuhe9S#!y5KlbvNP8Zi5mG(zOV6kN|6uwfSI`fY- zt=}_P+s{xIec~1UnQ04}BM#8qz@O0PdIop-93tw^%D`tm(n0tBL5lZv#;T5sV0`#~ zMW4uzwA{&yYtF6Uti3TL-ym%XSYJYUxE40;PM}Q{OIfR{n*5yWdDcqbPd&@K2&%tE zWAE9gu}3>+w7t0rpST@>lXbdq@|ZUEkh_xqmhSxeKo|B~A4aL^T3C?Q2}eINvre9%hdN15j^0v$}m&6ObiodzA=NeDV-$q<}jMCwE<qV=(pWstYE?Cy9h|!uqsp^LnyUi^Ft=Fx5>Y|kA8(ycahS{Jx z=N5RW^oDi~di35claJjh=GY*S-gF))Gk5Qg557v=m(xz}q3=jx!L$xeM3Npr;i9m>qHf{JdW(jJtP&(F^sdOw~es(MOB6|0svE6RH{C zuM+&G7~!i&J;|WpweYU?9|aGYiGBNigVg@^tm1N8oFVP(&{-DGOKl@L z8lS_Q>TdEG-Fk@evR`oIT`HAd@4znV1uo{BhtR(JaZqq!C2Vo+NIkbSkgdgY8tFD! zu()Tz1D!6&a;p!ERvpJu_{U&A{b>eHzUe1y$WDOOrgL!f5Ci$_v_u#Yz8!tO94EEn zbyV}O5(85L<*h3Js7b?*+Fwmj+*v0%@~;--=tYk)SoJ(l)7gM$4LWmjuQ+OxQb4ct zM_^aKY)V#J%mmd z7V?q#rm)^h6(iM(zy{xv(UDx2iF4nA;|_&HT8c~7i3uPelks^;mv`^ra1?ATAqwJQ#v40dhv_@>il?qpw#Z%>)Z{T3zR1n)Pb zIZ^`~OH`E4s`rRm=Ur)C>ty^}&OGw(dHDDzgT|~+kXbD% z1J#GugyY>0qT%;s_D=2w6(?HhL>q_vsJt zvbQK6PM5KTcP7T1>A}VCQiR@Px>7++7{A$dg8#HBC-?no$`%?V?W~xJ4;p%kk2ZGY z?sK-_ZcR;j)B(mSM^ijCTZR25GnTA9Bz`%t0y|8U@sw&6rAPbYtomU;R@xXTjj|2d zzv3$Fzt@u#gU?cWkq!Mx9tGc-m`H1iH@$MR@c0+dlRf=X_V>uz<7=7-1 zoy+@J!n5moVpiG`7}52obRLS~ZGYo%;nlIYdDbBC9g;&$d!U?q2W=H%pQmNSb zZII~dd_dIJNQdliDUfeG9;|b_h!dV9lIf;3@aJtJwzN;B9@cAG4JBTli91*;99C`3(ZEi^9TsSQR~Qtmz$<+|idSuW;yatG zG|t8!3tnkq=AFyzIX@IrBQ4PJ)Y~ivv67cBO`_KPQLhRNC@;Twq)03dYJRMSh!oB0zHJyn1UyCe`HFW97BkDD;-oSMdON8)w7u!bD+|lqSFW6b8|n zD!f2hL;Y3+Lid$>P?0(hT!xtO=7~CNmu1RDO`jBxJKqqD`_7|Dhpb`hrIAptu0kFB z&coV|H^q578p!iqBd+Bi@OJHI2u;zW&Y?eqjr%>xwJZjF=4>4^7^ zzmdgO+2Mh{5+9MhS*)>agSGE}fp?U3*($U1g5~#cX!#+B^}3S(Wtzkv1b3GO4$pxf zjTPd>ldWLs`j_gf4nX0lhl;xMN*d}HPm2pWOMTUjoU_fJww!LmC;yuXvajU=A2Gv4 z+5=hccZ^PJN{*usE~4SKwqkYTey)`Lr1Scf=y*iB`_I~nSKma-S|4k3AnGb_E==WD zUd@;jXH4d;4*1NcJwLguhS~|Qp++3T-TZPVBX@2|+=%N}Pvn_XH(@(^ z&#Sw*@_>NVIA7W^K~6qm&y|5}@jR85HpI~I|F$qp%7mlFXFvhlVf#XFI@|0dG`w1; zFxQBYdT}%G$=F!o*y+&;UOF@! zxA&?Bmn||fJRZjj=KrF$HV4tBw>`%-<-=J6M`+*ClB`v`vp&p$3a=(%*MED(O80N{ z_e&U_xR(w`_G?g(bucVUzf9wuF4Lj@%czUH0ok_Aq3e6h_|?N{@M>g>VADDkPi)(V zv-5n#oy||M@5LSBpWFWUVrK<;B~`(RsFN%=KFH5C57O4H74Y=L5}4)sl5W0n$O=K7(SvA`}~HqD$|0!j`|4WRTFA6JG5AKkXkN z^EVXg8mswtv=?X1l%$EN;9C-wse#YQ93quYR6DiE56Io2l!d=?N^!TVQel9QPoy(rX zz8GsL3-XtGGzHLdYXUyloI`fM$6{U88+f5tftNL|(5hXJv2x&AMe&vZ`RnaQ*kgs{ zaq+pr_ha+0^IKIueC58lZ(L{GbRZe0<;_HBz6Jm4<9U6^Np`Zek^3Gsz-NjjK$RlT zY085=`Vo93b`cL6(oH_8%%A_Z%aTB8M?N;ulCmP-z|ZxP%Rw}zGrm9Y#Gmi9H8va? z-`?a&Vg(Ge+X;SBW_|acEiBH?!`|z1gvBF=Q`N{yh+ik|OVzzk8-_K|&=IpZW~DpW zAC82X(@ca*hfm=NiHTcob{mIY2xNQv{_@mkT5?OR4$98YBlu$37|^fZ2acB)E6C#V~6L^aAB>`YIzt+mv_PNuPfQ-%1rUg$3rw_;4NNS zszp!s#tZIKe$mOXci=er)4XB(f-aa^uC!a`z)nBx*!#`uZek%fg?{bE& z6Ai`1H3#8(fd+ovX2!!M#@G7$Cug;n0_9FNXPv7t;-8b(6)hW!L=(x&f79YUmmQzL zqf70TyGQ&&J7op^y}3=P*3CrfPW7dFiy)|Ts~48txGKi3lDv->GdOzZF!}83DR{9g zT|B6&id7j)K_|wG@2SOzyCfFsMpXgdeo;gZ26ZHvhaOLpGWLx5cgbYx9jcQj2opCS zQV3>qp_BJPr^MuF1+|-j4r^Y4>Y?S})~!Gm{!QAww^4^B6*@egwwCYBbf!5!RAqXK zPcr|e&*E_92v~0FOqg+8vI*wHVB$*2!J_q+nnMPm$D2VM@Z*K#I%^ds zPUu8`CQo6LZ4cnd>%*k?_9{QzqNcPd*O7abtAhX7j(lw{aKwr6oYzrFH?Q@kZax}l z)izW-d#+sQl64YB1oRV{HQm^HYz#eiJ|g84%TZ=^gy46+kTA9p^lDSE_IHYu9Swxd zZElJCqODoBDV$6?b`XZlHkW0T4MlC`Xe5bIseN%9E-$K+O@3nmZogi`k)l2H^l$)t z-8zS}jF!^d9^Q21UMWnQ(E^V0Dtdc8h(>4kMpZcr#m6>d`(^uaZ@3;h#Z6X3d0Z@8 zBb~R_JsBzO&q;>1j|};qK`Ea-e-GZSzQP4_S7PhcQ+&M!!0%iYf7(v)%tc~G7QBU2 zqeS7Dhb4~m(PHgu9YM6}gM-dAko&p|(4}Cq!Z3S+xO!H!P#rrNY7%3?b!jLK81S4f zubv8x7gYq!dl~TA&j>grT=;pwR&07(B4u1=VvO9JV#*dOj+!?Ki#`lOjiYbi<0;8W zmHU8RYdoTtkr(if`gl5%z75TH8BjFjh@by{P&l+s;Ign-SbuH^X3b+-JnWCS{C*`2 z@*E}mxAQK1y_LvO5^tf>bPx<@Jyra7OoL1|+G3Gc2+W-Ejbbxr!n*$&g#1IjX>L|~ z9<==&3Co)mu@ih{-z}%}*-SIgNZl(;$qUD%GErF6>ky4jULx*Kol8Em4s!hR;qYpW z8>c5-7ayi%(j~KhqF&b$7^}5KbY2m}SKrvsfsupx#nB+>o8ZQE)?-2Uhr}-ZJwh82 zud`O30ojCx3L{=Dq7i%3peeZzmE8|T)dkgHWn3V1Xlw${f+Z45Y>JOdtGRD;9{n?q z$J?!yFkLMMGDrNTC2uv+s`U_VNZ!fUhF!${EBe#9pUD_M^CP%RE}o8`PJ+|lOYrfm zj5=OCx4dr+dq>Rc4DXCwzgZ0uR;>*Av~>r*WU-@99}VJ^zkN z#H0D!Sao6;2j8`aqnXvP;qf@q?sFEduRG+T`_7SPnHZp7QH=27?tJ0YyHXtN=*sEl z%Q;NtCtjS=8*|s23pZkh(1%~IU~Wo#G~3xgEqB9&-CNh-$GUx#c>jvjMUJ9un?ty9 zkt$0s4c;Z~H9BS-3uQNrFiJN8`mn^cJZb|2r^#UZ-2=jkm2$o?M2qrPmy2hwYGVGh zXB+rcbXxVDF!1U_nxOlRdiS3K4}Y`>{c;Agr%ETD zz0sXFAD;*|#eKLi$&Qa@G}EnAQ#|W^L~=UR@!;-@czWnEUKw-?BAi;vOrktxulst* z6w%knc#anLt_sHrDaLh>wc*0i-^nt0w@fxNn$G7pDvDReQbzf9sNR<(IsQL_v-Sk0 z&~cD|%8-0oB82D}kZvI3^?hn9y6V3UQ#5TA#>%o14Jaj%h9inwI z_{PRg%Cg-aTs-@Q?9z-VSmgd$h*C)guk;z%UU(p0e48t}-Yo;a%yOE#S7KaO8S+To zY`*+(DnD*@;Dy)I;Fhrr7tJW)zNUM)*`W`OceljFor9@r`6OYndq`d# zt>~?K2>jZ=6a(ISfcQE^vD)ha@$h97z1s&Dt{;mvZs#Fw_-(AZ)q~SdO=A0{XSub$ z6vIc9@&EVnW&d}WtA9UG3XGKwAka&)H>d7FFGyqE8?+r|aXHLHE&r!%oUsYNG6p)3IVl3e9@{lOivg z(O>Ug+^4J)@0{g|H#Pp^VSJ6<9I9cl=Na-7S5cCw4e!iyl6XTa$oAd@r+Z62qpUof zb!!*4&-w**A@{i6?LF-bFqZBJ0-DaUkZ;YYgcMB>H$8JkAu|U2wiM9*nFaLW`z%cP zrz`D<*d!`1*Fef=4_;TajSrQm!OF}n+{3^NwOg*yF=r7Dm6*VVvudRI@GW%gI0d%0 z-oppheQACB09KspM=z7+ai=BiST*e+#z}i%wg<>L>qRL}(C@-(7215NWD^G(TEoZc zG1BgVxp-}BU*)xsGx*mvhJK}vRXh^^LIdq$maohG;O#1P6bp180>`(b9z>{oN{I z{I{P%=%W6N#t;{O`9@Z)H{fFW|0p^Shn)T|j%yE< zq#Y$GNy{kIb3f-1-$E2hLM3|_vNBSM7TThXmJlsc-E$sA86`V2l37*=q5SUeU#NSZ zdp_s$e!pHq=Kr_g%i6dGWpuP@&+>W5q)J3N0r#_<9sRv(j0Pq3eA^f^ zyk}Q{lhZHKQ?Gia*|e7};mx3XZ~;a)xzrR2j=3j(p=^<`r?z|0g!amIl;@v}mmjOc zZDF}1-z6c#Q`6z$k484JMF*_<`+%0!dzMrj#*907ynRxN`;Z~PP7nUZ_K%NPVaZPD zv{k3nnISOmfDDKp%1VOnE?^h6-MHkoOuYK#3aKADim%!#U|;-DNW4?cS&m9$GR6wD zz&V~Dy~Yx%)t6H9>6a8>o=>UC6R{&)FuQz(X`FDf)>unM1%EDxyeOd7;d$WJvySt2F9b1%XY#M z52DdhQ4W$)N|~#D1#2H}1^VNjaBC+fGlw5K0^@%Ke`7=m_y6ol#f|%H_o-0KSgpw# zUq9lzXGF4NcP{WkCX6?{By_g9LEslVjDknq!3zVXfLQAcHnjGzY?o;@gSN@QUh4~3 za?g;=?CxRST_t|*sAT%JNuB0d>odDmf}_Nyk1w@Ory-I{@G4G|RnBY?X=yhpzr^(0*%aRC zno#5Lu$uL1PuSE;Yp@_lnqs4pnM%oZ{`7?h*gf?MWjM#cMVIHCx)y_{B}0hcafgrk zPq>@(a6tbtMl|8mZaU$pAk3}ykY`rMzBDX>!^+dBdMsm$Rt%~6bff;$v}K{dgXgRdL<*}UWt?$yGJkmf7Ca>IT{;Y6 zm|5azwEJ6xr;Prfx91pc!nkpuDD4Hh9jWBAJDlY-_vb!Dj$pStPr><@He5t&A=5VT z#fQ317}b9et-Sw%*_ErnWNcwSMbdoO+GQ*x@+wXmnnqUg7g>#sj$?7Z1j^YY!;}k@ zV7b8ZX>_hbFM~YlTs$39k2PS!^UM5_yrWpQ^9_-hrW{~~)8@}|El4NJZkY~Ol+O;gDys84U7d(Rs zM>&|^lL`qn|=* z!G4O9^Q7+nuCyhylh!{UO?oyPNMn~JGaOh0;ih4rJkEigIm6TMem`n_g%0eXqjk)o z!xO^0?vhPjDwWP!4FBdIf^;3B`dL4R8p1A;p6OB0{uuxt7Q}&XorR<|L=pY&88Z#J zX13y43@r411@B(wP{Vmbis$e_cMX&%+}FGZ)I^?;7xw(SZhh;uu?c3)Wg*G4TGye z5iEJ$HTGtN8tuucWkrE6MFUmsY1{n2EJ?WEvA-FE1*T*9yLMUlWBwz^_;n3>pN|3& zUF6HYD^a)gQ3x{|AQ=dijH_rD9a^{>iYK*WP+x~Zg6ua=v+ z$r}#uyUg!=od_)sG31{-nLU#%hpWM!`#LVnydD;(Ij6?dFIM1wlMQU9tDv}Z#q4c8YKb;h5V=I#>Cpe-Mt-pT`)F#>OL zvoe>vZ3y`6;h-qS4cO4#q+Yp|)mA35>;9d*ZRG^))iz))`z>**kU9Fda00YwkH<}G zq}Zve z%UwX>?g~6Runa!AUjav-GI+jiE9TH!_SK?*ojYcRO6SIqy!Jufey9qxG+*b8iwD8J z2?BenTmu?%@1fC$JZ#b5gKou%%u~yajr(&CFRwU2j}|sn?GH?b(3uzbrH#V5!bXpd zhL{L!;|A6>eGC1*GeCGRwD|*Z&)DzdY5cLH=lMd1dtC06D>Oo0=sSB`gO=J&G!~e7 z^7l{T&~e7xXy^NIYSD47UcZF=yVNPYxdhZJZt}g~*RYRg7Etwjq35r!Wg1t&5 zi`7&X@+(qUmvPFm>}hUI{kC(IrFE8eKGtyzjFiSsPX|`?lq0A6QqZ9{4=XmbGgWVa zL0=cdHa(ceX6QZQ`fb04-~aXSrd?KC^|wkk{rU!)om9dX^c=3)v8NZeUmbu`3-{4G zWoOo=9nCx>yP#BRBxsIor10c(d_dz5b}{Z2SxKGd)%Et{fQ83U;w9`x()!qlkVjmo z5%2~jt2x)xo*->E6Q*fz7wPRD&khdx%?%lyO+LR*l1Ho-UE1&jkIr4m7EC%yR*G5R z_eupWZnkCzCXJ%YO^KXwrXg4>4-=zq5$l?)fxlC-p}W5gHHE5B>Ee7*RJp)EcUFXZ z{qEs=scb$gt(E%-G61$yLEXfMtSuAxQwjS}>WU?u@QmU%>$LJ;1Aj5OKO4B~`Pr=2 zPzJ`lS_vOHCcLYK+=L6)@T#p2&)VmJ^I!{JvRNCwmW4v<_96J$Uz$qaYKpZUH_)KH z>FkHkP|)f)#d%nd6gZ>E~#x)X8Rz zS<0M`-((}h)S$|sgQ>I~6gXv7aD0yzrjFJoY&U}CM~fgg*oD#$Oa-N3FFCuI5K`W8 zmN|t^z&ExL&~{=0X54b%l&S_}JMD(Y0xKtZo|NMmrBy6-PXRw-i9K%Go`PSJ){*CJ zeQ3-NX9KoyY>iUwOv9iNuGd8k^bH?Gj;?4MI_g36s{EHpS z>E(}kcCo|S7qC{ye0kf>!2xN}RC;$E99ce%f8Mbk?oS>Gp1tv~XG#==o{l5c?^Cfu za1Jya@8rKNOoOzaUpYtBajQI2L zhhpgDJsp^>BzRoDl*1HrH~8~v18!RNlPyoQqrq-*R3ht35$gZg(bXaBzRhA6K7@1 zaJDYm0(bH#lQorxd4hvHB|e`y6?VWN&o6jB^a($)){TxXE1-d9T@bSZsxFKD6*poNQ5ZM+#dNlZN{}i`Wux22(R~DXsUbD0s6z z3_pF8%~{;cKlFc$Pq&J>LOnGK$mrnu3}5nJZtfFy&6ZI7X$Gww_7;zBdWcm5SL@D+ zFBtzkml--lu+MpJyiAx0G_}2Fj*bOw?R@6`=nA7rCq%AEkJx4xP1^TO z8NI6Iq3d+>#NxoKEGTR<1Y}NU8s!2zXlxPp@zxpU`0OBlGqZrJ9b2HVTmp}e-@}p8 zvmwJhmhStHhOSF$tWPFFbZGob%u-AjJWy|7zQ=1kw=x;r+*grXw;xSTm*@AGx{<9} zIW>JI`r*;Y+|y2SwLQ|}fuR#c-xUW4ERZfZY`+IQ662X?Nf7Cpu3&4PI|?I3%$XDV4_J*rF* z4;IgV6fC(~W`x(x`iV{cUZaq=$7zm?ff#EM7uxGCBu;APX(so?{>7jb`db^Op zTX%)M8qrS@J8}x!1{Tz9@t6j;dce_syIEvf66?y{4>w+ZU}L3jVDQV=4%;Qi(0+U` zQ_dHhWrotQvF^PvNC;fKtw0^Mm)UQpC-l+W1IGVZ#jY$a2C42Dl(ScgZY4RwTc-@c z)uKS7^lRa{k|!CpXW;k;wdDDI68~=cP5OQK2?WhDhZh?CK~vch7t1EH0z(;+9&5o4 z6>NoKVYj*C{BnU+mccZGFS5hNC&|FQmeu`^12MXF0q;kW=w6K4g3m;CmH)ul(uOk zBo`Y)PD&iQ4zPud9Tu1*Y@!B8I7qf>W-DL(M{@3$xG?!b=Jq{-`Dc4m-&le1J!KLN z`fWis*XBX&+gwOCv1QwT1l8AJ+{gNp@cL&xNZ2AaX1gsYjW)-JU(0J8 zEk>}-I-1w-(cbFn#-9Si$;5;xzM-U0v^pP;%I*hla@(@VArKJ(mxh+S@tGm zr8Avu+xxRUL4#oDS_M{nVJY7AD#JzRe~7q-(WvP0i51Epq;+?^*gIi9w%m{oTK$We ztM?(c<-q=$xR1vnzhyH96(PHHrjp6d=GeH()wnESH~aW|B1?LE7E=aGq1zx$%Gi0B zt)Fn6b-o<{YtOiV!>@N-m&_b?ahD%n=x0P%9Cp*B{b}TUNE5&NB?&uL1Mts_!$&Oz z+^LHpSYdvID{o#-%I5#L!?Whmmzr|CdvP{wI&cK*ca$?%JE>}~b?T%VmjR=~jYw2} zixl^5!$*@F@P>yP$$cJ(>cb9Gcd#e#k>bhpmfga8u1)-&KPza;*@0AZLYs;{i~vnL zAvbzRohjV6WMv7bpmpqL*3@8vJKk);%R_unwk(%7HZB*9%>IWBcS4zEXFN^nwPY>7 z5s&B#Iq-@?+-Q~qaUtcTU^|V>s1Z9->Q+-$8pp06UVT$z>(~<8o#O zNbdGOLXqLW;Z?wlnrZAKmpybnnXk)&%FA_}PQWB~Tt^B{j9-WSa`R{jjHSy9-!SX) zNBD=PkbLN678%jVo`2TB@wQr!5Nd}v(v>~}i;`6_z{VUjeRWxakr4!+KZ6ertHBLCgt{9hQdpc0(XV$k zQf@0CfBQkUX)xowv$4~x!nmMe! zfPtUPQ0CJx62*D2m;rB@TZAMoRqI4Yx3QtyKq`?dNd|Z^Yo%B^6AG7eV&Hf#5!O1k0>B!Sasx zLqA!Ly?pAxeR&xndLw%uk9ZV-^2C|A{?0cvmuY0_e|uPC;1b6jJ?F6Bx^kErrHfZ* zNVRgx_xJhn5j+NSrdosNob()`GLs&WY zw7ZHw$v@(|%xAIZ2P=8&vB>>0t0Co+v+2-OTk3M3#d>DPU~q^pvXT?s>=EvSd_2E)`d#K*{Er1!{bOy5TJZNE73yxki7tZ@*~lZ=0@J~QmJR3? zzE6$plui^cH-9i?x2z?LUB_8d??L8mS}D3zumyBo;>oFAnYJrcaNSw=_}jX}aQ32L zG7^Q*&4rg}s`MQuW`nN4R%29H7qU`y1n}H zlD(c)@O$0?T5dcDhMrfUk5(rn)62>!^jjlum2r!z7fFc?&CZa{?c-cUkR6kGo`-*e zGU1d@4p^1wNOn~+HXx^fMStB3i7!mWvlsTViQ}9p-AhXRtj<|rF=x=EF`qb-u_<`@ zPa#(=?j{d&Te1GNVUlI-iW2pVSUyhiKIB{9p|TNc>6Kg~=*J7^Hg>D>MKG%Pki_bIC>f3j%*j|_KTz7q>p&9=p0n_ z`@$IwQl#eD@~mCw4*mPw4jwyt=<&-~e$bZll+YAU+6p<8VI6@{hsBa7LrqcOrm=B% zO=#OLV_H8upVu_XVVBZY!$~W zS%CUVxK(@rojotZ)i4LDnYDyAe7wpB&3#8h&im0l+qd}Y?+2E+8^HA0bIM&kA5AUH z$*^6P9>i?~so%{wN4EnP>K^Vcqa0 z=(?<$7L4d)DoLkEHZuiH^*%s;xB^}LaFt$*HOR2`F{zd&u`k9ONpYgkr|G*$GdAu9 zRsCd09d?wiKbS|WohCtArY4BW)9LlO`OveG!4;EPWG(EvM}3mvx0a2RJWVL2)$Yh{ z14|)bK{%g1Jdm>+f0!vuN)UJ_&pFEwEtW0#*B0lmr&F)5^Ia`2Y#w{cY4km1 zuf`5%CNdkL^=1HU?=N&S)ZFpE&tfV{-9!#KE9vR7Tr3mV-8H*1$h&Sb|90UKjJIrI zlCe9*e+EB>o3=~o#G?aHdj2_mRab(#wWXM}R~=_hJ4jyZHq!R>o^(@7O5AYY2oI+9dHq!2Q4FQXDd)$eg>}|9?WaV z+p=#-KOy1ka(=Y5C!0HeAgi2i!|gQc#?S+|tIwUPW18AgBt2V(Me9%G-yYUt8M&8H zV{JCA>n?^8?i*WpK=8s$y$7?;9p$t_-=pJnRd}$bmznw+L4tW1JP0)AZRYt!*%+n{R8WXKpRN1^?`P+w#Mzf{K`UJY1;>K^~myI)sXccni?F1f{z zD*lh2UJfObJAvf?`@6tW*v%hRs1mYE5j9-NRpy)2%yobZ+Nkmv(icJwE2m-q{c9-r z+JCS*v4Fi0=D5T505wLc@F>(N~{G@5&-v#W)BUrv$qd=Jv?^li94b0X|= zokJTug=>~+Rq)mL3FgLEixU3($+A|;OUzo9@m1MIVl~xlT(x=`+g)$QwrqA{{u7V! z$^8=0y=*KWnlXnnf(hL3#zB(bmYS3vR>6h*BT-qbEl$4efc@44!8F$^5U5g(>4r6I zWq>91KGUV?;t%|(g$_*Ui;&;e&ft$*yud|k zWEJx-l4`&YPPZ`=evEM;r#X%=FGCsk>{TYO(L>?GH9b~h*TRKQn?YSsOX$B?vo%F!Cl?m1NRX@zz~y#s07reDm+bfDNeOJG)J#<25)XYncl!?>waPP1FR zJjtf)#hms?sGWQf{yGm}Js*T_*_bE1<^x9zd24|s8Q)-2TQd5o4u}1P0|bWDC^%l1 zjLCyifU7EIDwaB8sZW&{(_~7QEvHkDt}O59T(Y7M06LtJfh3! zI${v(Kl(VXc5($9{l`%6k%uO09;+mlY9#%Kfj7Rn(SsUg4ZjOfzr!+gc_US@gsF@`>mVsFapS%B+%tkCv>=kvUn z#WSJvKS7zEX{B*P??v-vxdFJ^cO3Rh6f@nDsdV4<9g59t=x~LYn>1OTosM{hn^Y=A z_1iRr?1vKMntaFS@n?BYo#V7yV6q1(u7ub2s+5xVhu1u{iroDK-uC;^OjA0V)x9tW z$BgML&QFh44E@cm(+Pzeg^J=gfqlGGMike4FBCfN?q`>)zi>ezqq&|@9o){jW-K>+ z1C@2XQ)3BG-$e!qbUho#w};4ZMvHXS;yR?+M~}$+lozpHF?3#^8YK zDB{~uaycDOYu1jX-~pY?U{)$La>?AB)OLR8bafEG8J1IJ)oBbCQPx`mY z*=+9#=RIalf^Pb~^HOpbxOHF7Fs>6xXMzNLlMW}T- zuBP*f16M)p4-t`&$_LOIob!e*l&~ zb)u)`L$K~-5^kGZf=m1!quiZ6bp66*aK0lZC@rAv6~tZ5vPBnGfpI>QiSKU)Pl9z| z?xqNCc6Tx@_iltQx^3Wmdo1k!vWe@OEo5YR@6v6bx%5Hvgs5X|0aF@ykfurw)0{K% z==H)6ucr(Yk5@1zKFk%C_UTDx$c~`5Gr#h=-C8ubxE9;xqv`o&3HWdk6fLk_k`<5B z(Wz>1_rwR*Av2ogYsyKw@5_nxW?XY zJgz`(|3oM|_8fhhD#7u5J}g~YQq?q98mdea;O2c78v0rfJ1(kF!=7N07uW=C`-d^} zz&t!PG!i!)%)>RSWa#6HUy!mihNvtY=Pi;I%e38sJH0$S8@Q33d~*y+!b4e&y)yNW zUWMgmskE{;g_KB9V&cG{Z{~h@_@$B@_RGL^ziu|+S_51DJPflh$M6lCoJ1#C4F1~l z1kPPMgfaXUEFEfsv~?m~+L**lRpnrbd@h$@B>{uy1tjfL4%;-!u`fmp8YeP&x#^EE z@`(aP>I{doM+@-bXIXCf23c{mRX+U<`AE$cqo}PankvtlgTk~PtSqDdpND9MS)lN@ zXce+R$gYf3C2?aC3zJ;pP%i>JZut>*m9S!r0r5Y*ml1FKR;RTq2OeDVNJo1?;h z)I8;lG8HIq(k_-AZVZO(JKJ%(QuJ=)GVcB`Th6I8m;dAxhj(+V*n;@?sPOahZ7M~1KpW2ZmH>svJh zHjy5=XRC^~oPEJGoRb8;m59CD^c3{QA7VCBFLR4#1d{6-E6mz{lkYcCn*x%H+27hF z_~(oVQ|%V++UKjntb!?E7;_98rn$kgrgxmbRwU<}6Ud#uFqI9sBnNt{8zFYW5!$U2 zOZVo7(RN#TdQ8vxnwvYg5J@7rG+xH$>o=L3#U<|3965Nlcpu%;Zsh|`C1LvaAA{*Ejz+S|fBb;5DP-=h@uIhuuJMU&R$edwP4 z0FB(=^5W4q1us%8uuft*SF4zs<~bJn{u}pv%vbDij%9x=&f?)IAvOCYQEc*sr&NDC z0@{5;pxQi+0xwRWabE+;-Y6R$rl#P!npk{%K?SwE4`8nUy_!`P1(>iV7Q!E!(h!q) zn&X$t2KFXnrcWn7RXDeIWC#w)F`4}RUkS{*@Ca`k{0cV;dEH0fS{Yk9kaB{T>-#Q>TB8H$^6wbYy5lp^;<&nS$LPyiOJ1;ruD4j#uK6tBhdcHu z+0sHY6Z~6W09_#faf99HM&o8lM%|qmu14JqAqmE?` zlFC;({95lCHtBO2nwT7fZO0RNt0o<0F$};wM~bB{Ri%2xE-pq|c$c@Y!pox%)qISX zBlG3qyxWIFJXJmx^rsJ`3m@;XnGyX-p$+k8S}NM7&ERVVFIe6Fv% z_uu3PZg1qrR514V_5;rGa5{PPNOQBencQoYa<*&h2*zwSp^^VNkh&4ebTh{=d%58_ z&qSN~To!&SPaC4^n@Qweb&cDzx|)CJBuAbp9n^C2mqYr}C-}o~7Jnxy9WsYyvgJ)D z*=t!HI+3hMg?CC=k^emQpKc#>^H7!aM!Ud|@2AjB`X{_})`CZWhhS;sGdAY4ENf9r zWxB;>INs$j_vC;FOY|E9>ji&%aEuf6r23L%=m+NiE1CxW`OEh>S@SaE?C6M&F3GM* zfD``fIj6D|K3H2{@FeC>2w@Z0qwU4qUZK~q(&{?rD6kYFLacH20}EbdX+M7N!Q*%| zMT2aV)g@&LpZS>rr$Ob^Af{LRiq+f1h}w%M;gEJOoV6yBe3CP{SHrL1M4gpbp{$Cv zF2`y#u8qO(fy#h`yV=FDQ`k<~Fmx9(UxlCluvhZ-ObuMBpF?BKg^6AG@WLEQe<~*C z^#K3-FpXxnx8Oacu{B=u6Cu5DKj*pW2z-2X2(*SdL7taCg2sQ$Xt5nAI*!EBZB@Lv zZU*D8wX(r2PE0oUhp06vm$|-6rey`WcJTdG%b`(|2 z%ONp&2TC`VvFjT>F=>pz=KA~>*T0U&nk5V9zp$lzXLb^+8#ahWT*-g~o$ajlzXE3S z{TIwP*2PQr%h`%GK@{?H26(rq(B$4-@GaVz&Fag94UGdJXmbSm1vNo-`PG_|0x$Ba zI)M-71ahsnykYA41bVi>iC(;pWCq*(xv1L@_^|cES*pP>aQrk7Q!Je6?o(^7=TkWJ z3k;!v#siQc@G?)Ot4fOheu85w_Q8zt3n_KOALf*D7&nwBvfTaepz!WE-db5tT;g5| z;nM9a!e7EPdx>r)chay_70E_r1xPJj!~c_>3z_l>utv(9>a8bnxvq1eq+%Yt4?9k4 z7Q9CBsQI*ahYECeU7(^PAL07Z&ozF}60ZkXb`H%w<0S>KK25#vo?uxQTk#>|&4q7JzK$Yqq^P9I95n z!4nm;;9yZZU(|7t<~WZ2M#wQX$wl5a3u#)%Xs2

Jbo~0y36oal zV{%*=yB1XdUf&~N;r*|o%*oT~km)z-%uc6m**cQ+H_y4o#8&fe^7kIvohR31}o);&RC{sc|4mUIYdkoYnywC_kkA}1Vvh|h4!2>Oz#E&+FC%r25>g88#p7+MeB6`0)Lxl`P3lv` z6<$Z#)dMNCS~d&a?GEDmfq!wS)E)eCH3@?MU7%Wb4Gfu}A- zxV=q@I8d0kT1J26$|eXo^>H28ZwjK+pBrqM=`Mcckk4q_R)z|b#!+~pF*xlxA-Kg_ zC~TnzU4GR8zGrXo<2~n#HlNGJn1{8rV}T}IY}rN;kB8B|+xqx#WgV3_s8P$j*ZdpB zYXE)<=v@Cw*sVLz36*G?CB22;|16mwke7?=M}NV9_(HV0xtS?$bApBJBYysP4vq~A zBkzL8Ec}Wp8?eKJ#+kLV%sZdB*!;o#lt+V@gLEz}D?iQxOBa(4KL#8oCBQQeYe-wX z7XzO*)!3zsVH4BS*q%4er00`GMqlFDfX*oP_U|#gXdA=Zto?*jxBcKE3Lavp!1gWB z&=!hr$zV~^pye}njOiuDXGwzeMv0yQWCsm z#dvqnIrc-@8tn2_VA+ZfIQqqEcqiV?st^3c!H;j?%}Wtz+Pa7BiTlc<)(Q4_{0(+< z!9u1HlgbvQ$-t~xwXC4P2zRgjO&XS=tYK^&Zn|W{RHMXfkk@5?^@g>~W?vKyb2-M; znjZ0!4Uci157x8myA?@5LKZgN0}PySpy{i`Lzzq)(f$_P|ky-+mmq zm!-^H)Xb7cyu*{#Ls?$V2>~cp!bA@O;7Y9reH7d*)~3;NOIIisdQGIB$XHZ; z=}ao`F0%VaOIe-TZ9e?_T#Pwt2n`nckSCSKrl!5BQ8}ytTl>kth0H}P{`wGbvwY28 ziYZ_}`;TfH;n@z7Z1upm(;r{N{3CJ|P4u9>31B;tLv4U@S#{4+k z<)MtOcd~(P^F^u2gJ_)XP<-I@1SE&eSZEt#zjnXpRY{MvwzuN#veWoOVAv`t45m>V z4{-fDx3UX%=UK()c$BZ*#*`j@Ws814VN;Zr;-C)UnVD5t6F#ShzqU3K{mfK3_wqnm zJYBexjg@CY!%Fn**E726^@#PWe1g$2yEjUtk1d!4=S&wQ9+|?KI(kHUkfX^sv|>5e~2yr^|X*@d$fsNc|HQ3zEPzp z>lmi-_CG4h-^)A{=7QnplbnXq712WnbDa3?9lNQZUvoe$l>hN4gF3e>vxJvrf@7>0 zPMc&x;LK4Flvab!HbRU^Ff+3A1l2*qFxPzn1QaO1%8WAtV=n`z`xkMRZ|#Khb8^k8uiJ6) znW^AqUqmJvtym_T#xFA(O$+oVvxf&x3ZB9U%4(Yj|0%e0o2TTFqpLN16x)*l&SaX) z2chk_QWCw}4&x`a;u@!=uw3C1%C8;8+ddl%Z|Wv<+}j>dpg5mp89-L*~u~g z{%_g+YQjUB_2`n($Il8BxN>%;6g|5i-&%HnJ-^Wp6kf&C(X^$|<8T#S>au8j={?fl zX(V3YXGOzIY^iXEEbQ?=3~M?ZVTY)mR~aOn+YYs3*eGrGW#?b~@-6_rt5)GmB^%CX zcPvyajbVLq((HB5FMRNO2t7?WBno`$N{Uy+z^D7Mo4Rgz!6bxLKAA|r!GdgW52D9Y zWO-ZFQK;S+#GI1^#@XyY!nx7|I~;^={Dc8;-SHzzukd54AEtrYcLQGM!49_hmI3@+ zph<6r=+K6A54bt!JU4!bnk0SmZ*uE@j-73DY z|GEA%PF_=wTYQ@^pz}KD{#Xq8>vd??B!OS=x)7#E%Swvp|6|LqxRCUdMYLECcz=9I z`|E;5GpZUv^XYkLjR_)~pn>Gm-XyRnbkT5IA^uY1=(h1Y>{_;irrh%-%j!TFQvHyM zo;$tRiT^PYuWCG@J*|J)y-!zguDv3@ zjvNisugHSQNik)uj0b&PFCM==NuWhcQ*o8kIJmxxc)Qy6JQPo z5j>P%O(T=s*I?iKj*e~K28Rx$u&%o@u;{^i-237RpIx&LQ>@-#=K^1tIXaK_#l^r3 z-Tq{0kO)hSl)+wJjg%%!v-;L`xIq6QKT*iW-c}na8E+;_`xMv1oKR)9v3nX*ozl&Z za7tncwId)lP9OgG9HP%{H*0(*YqP!fFW8&)AF(LX2p7iruwcD`R6Y6=`}Vel9sZHT zwNDZjn>$|M(3^(b5%VT?vv3s^3LV9>b8R8}$#l4LpM%+XVW<-tN@s4gV|Y$Fm%rQ! zGis%&zEj9%xs7Mbhug!Q00aJ0)O}Hw#GICnDPb>L-!hNP>6rDk3qQU+%U^vVINT{WD3OTHKaSrdUCkxY69q>_Z z6+SH+&9vG&*y;I$(Kw?-N}^w2=aDVoa`7hXoA4jxG?+r%k~Nh2J`v13>qNJXy`i^imxP|? zC^mVN0sAdCuDa9JiY=-N;Z2_GNB8Inl<>=&1;@|gwjb%lwufgy&fm6Xqs34XPaa5T zs{U~+1z70A8|NUtL;1HM-ex0MJE1=iU8$FE?5*U_z9OKprierniWIF^I3J8 zaOqDJ%}F=lciUC7!jD^Em4+4>EWgWU{>h*Q<6QP+)F4otK8(AYuw2w)R?nSqwx=^Y zU1(*A2L&1F!i&D)RJ-Ocg}FZA5=#0*T+?IJZ*5_ffqDX`>jIvd;Y5GuFQD??SU6KT zAM55xVB69_>Jjeih8+lml<7zDPg597>8)k07qa2zEjOlArwwD)YE$-?bgWuki10TS zBmOKG8QkB4;dXf-9wu@rpaHs8Qig>a5AiKJ0DqbFcndOda#DNO; zpmg|qT;_9v^XtCF54W$DiyaLa>D%NP1Kmm~4Dfi^$( zy*De&@T5n>P2fpwCl~%=9c(>OK(jSJ<7T%N{1|0>dTygkMPu*c+T3#dUEzv-n}BXe{ovEA(wJFL5P?Z(@6gm7rD7W+vnF72|I8)>tb% z=51On>Ct&hIK3c3_#LyxonE)N0SCfCDd0NGUml8*&whMhQ!T5pe#4G$Tg=d4An2TT zL#I(SY*A+@oVrmAYqQJ&yvD)(NeigHMV%8;h@yS**%*7ZlwDE(f^!bUveN4#sU&M9 zJo(z6DQXA8Mj0P!zv7LJ*Hf|ZnGd{cG>5qciZE-BlIY)-4m7nJOeu9n@a9)CRcyS$ zoCY3*ir}5Fv{{dhT%iEY@6K}8Hwtl&>ISwd`Xzo*-a(r(i<$n|pF%(4ENQQ=W4hZ6 z;ac@WKJ>mVe01*>x-k}P>85_=F9z4SL6ozimA}_B3X6~L!)v>TgNls`*}PJaDCLHd zaISw90ALf_b&;#>5&GU-x_IsSn{Rv(1%^L4n$-(fU-e?0HiyPx_^{=y7%)giU&HG3{SPvm<(2f7X% zgG#UA=+^dzJACFNZ+^BvtTS-9fr2%D~5@Ey4>->`Q z8I*EG6HF`o!NAX#K97<>{M}EEO2;PRyqnVElCfrzIUTC-;jb*YOi2;3QXN<{!i1(S zl)#9J+ks0c@>bgpcPO z0Ev=O^)vOathaMIwaqD{`@$Q1@9<)BkW@H2OU8iOuHCF@i#!|k_$24&Kb|ezh1`^; z8KUXaw>S>JV8~vTyv8WQO!_;ckeXs-`Z$CPfL5dp?a@8=XiWqe}4F%>ru6*Js^lukv4XLvcsJbarAv6#n+iL*r?L>9TUn z_wgN;Vd=<=cRH|9t5>p{;WaFKkQv-6sG$7*muP8Qa`iL)M1EdR2Q+sZl2`x?bjBTn z)*?f=mve*ta|^_9*=MZd^GQ~0CMEf#*93t>8exq|07h8~b8EH+9bLn752roB4b{i+ zk@pLjzibZ;OV|bvGoG^hm(Q`jTLM?UTBF9w?lNo5)`P%IUwSyvnr`bnK?4OZru=aj zU*&d@_kNj;5zSBWYU+G4Tatpln`&!}bfl>-SP{R^H{k0XQ*cpGgTtdw2jB)9gYh#> z=;;zEJf^#hq|eFWBz+DdMaq&VmUiO)K8i3zO&(564yT7l!l2dr3U6(8knLB|qm5^) z1eaqdG`OcS|K~0w_;**MG7iPlZDN4AzEFUz6FM!vxWN`HBSXziP;pAF(-1p!W zdzrA3$uI6l3aMw=rd8wN-Jg@-B~^>9Tf9+mXcGE1h0yz3qoLr3@DCT{)`LeXqwnPN z?EBBVkbO==TrKooOy!qx<3*W_*S3-zyb%Le%H%1%XE(QH=jECTfsH&%a}l^_$g?|Y z2az{crjsIL^!VMwS_bz}OV(Ysvb2uXEztzA#&67g)DI4s_GivFi0eEkP5-^?`yWN; z9Z=){#qowTNF}4JhLnmZQPgwJMP$oJQRGW@c9E3^(vCFAEGaEXr2Cw6jYL*5Bc+V& z5!vbY{Qj*!x~=>9JmSElhBnJUyL_2yh9KT5dlf|D(l!gOgUxNH77(Isypj_&2bGNr{F z+-)noUe!*M#1m zJ@In(Qrts{Xy+d;D7%>R`n~>q<-Ww0yR(O$eSHK!yRQa|ZBqX3%M8g~w?YV**9A5# zs1QFb4Tn>&Qk~!5?JoW;9!$EmbHMYGnnEM03%8n%!SNPrsZ*5^wtRmN$F`dZ-qMaG z&iy90j_@Hf!yIApNB%D8vA1;nQ?8hB;NiL}g<$`ZX102_W#k)A0l#ll0wHMT7 zxhu}$+7SXCO;A-FY0j4ROG}-H?&z#2_z_I55jQ|Fpc|SVF-EtlCK_vChyJa_@`5Ko zE0a5t`oNF$Wb9#Hc}GKGWqSfcPegIg0atjRm*mR0IT=1bai@_NBE@W$iPT3K=iMay>|9Q3cp3uYG3j1 z)Q{kzyPb8UbL7kiT`INh4RLn)+?;Bp*p<>w_Vjai+!3kD=3Vzgx9hQ3DYrx0&9<=k z#cEzYlfdNYAqsVjB7?E>>BWm=93=VVvVz9&n!5S&ZTdgxWUCRsv$~Hx9h-Q`ty1o0 zeS!lV2cg%0w`lW1=^kWU$BT=W;(=i%;@YqpRvI@1ODBI5-y zuL4sk`xF#7t1~0YP?oF&;IZ3FC+U<8_|Vd}pf`T&S7O zYcD>***)%w3G)VX(vcpl^I#A9KOc*^UQ#xDuNq-Ui+HzW8m{pR=OND|yiQ{Tw!fG` z2Zrq7YRfZ}HC~G^_H7be_hyPK*0xjNqinJ2b9Z6Qf!E^8yALGB$VR*}Y9(6wL~vWo za%#S4M`KcBxM}26u_pbS(^c1A-06H4BuU8NEXlQJUy!;f+1nz=Z^^eJuL zR4d+W(wCi2(Z~DUJ5$O{4aj=o1j!E@1%s@6;>3v`V7QGmL-I?84H`aTai%VYp1Omf zJyJpQ-VwBz9)bTpH$&vP17Iecqm2uO@{RS+;Py`yuHSNl=DFkxHEhb`-(7R^8{&dX z-UqX;olPm?lxLCn zrpyiFJqsbB#h)^p+r-7&o;n+^zD&l!{osx`3br=5h_indfVH`%(AaY?oYIJe*6=-) zw#S?`Hhtl(|Lte(`r|y$_aye&auoY?ScGmTj*JyM`;VMb{8K!otqau{44#^?HLqOL=z^S}0m(oSg_cec3(cQ?Nl zuDM3g`IN`vx3A$aHlP%XJC<_Pe*qw1ie&t~Z8nD_x{w-$Q_RN_T=aAWEQ&Kj&&U&$y*q~rUbyq2f%aH9{I#H})m;=Y z1RTa{!Pi~|yh7EGkJ@Ak%NoOJQKKBLD>05%SL2(Ff8o7hBBbee7bH@)!6W&ZmWH6Iu@&^UNDidsCSaRUjoGF$ytH=#%EH{i zZuLAE)_fh!4gBSk>V8nc5Nn=4qz>x+X42@)c>W%8Uz|0-Mo{_`3hTp_(c?-gnaW>K ze!abz3(2U_xgJjIO~mP5H#zL#9bWeDHoWM19A`;;jM}S5p;{+ahz!0>%Kz{E-X6#A zHD+?t2R{rPXvFI`=bJU$$!5Pw%oGn8^8bhhk(y`@~V_tEp@Zd?RwR{F8bcQ~7HSCqL;W1+_c zr&kurm{r%8XIzTGhz$>L^(k+R{8K2@bO;b{6-j8D`?|79$zeVD-eUUsGFM{e^}*f~ zPKxpCa%n(W0jamvP{6|%&`X*(KKICz|GWK5%+!@~WN&uK2YCm>=h4ODe?=W}@s1OC z%WEn6^?db=f&E~wqT3vG3dGRm%9Py6p4c6YI!5LbE@ul8PZ0lG=mxnb- zKCwqabKz}pKktaIHRsTE*U7AAu8PHu1No8aTH(|5A!6yKZ22RTKr!I)IQVV$7^o(M zygs>b(3uWm{?p-D9eW2`Jy$Rf+l$8~Zf(ta5q4dRhnQ*`@C(x5{LM+UF|dHz+^U3` zRu(iu<&s!j8i&)5r10MFJGpsKm=IKPM0io#OldPdON`J=SQ>ZNS-X1{tgbl%=ifBK zy;sj5aOn+nupP-t_G~6-_n5`PK@q{@{v=wU1yxuOWPB@_&McQX|b@b^=C9 z`_EwQC7iypl9lGyab2(#76)#@FMlRD_Z##b?NZA@S7{2SNAKieeOrnCJ`|%>!f9>B z3?7~5p;*6v5?$HQ32&a6gfq*-@IgD2bx$45D;Iag7&k3mk=Bn?XBUcPzYGMQ-cRK% zFRHkoW32pM*nIf)p_X?1&7}@E|G}NJbJ*wYCiF4TfP!U#GQ(B5;!J%*-u)nk&q=*; zjZSabwN*pWyuC@7klL9aCdHs<+Dm!JfF@p9yo3Lql~|Db+Hs%t#tNHfOW|M3WB%o! z&3}wD$XVKB=i0QR*Z#+7xyN^QA8{Jsc+jptxUvJ!Y?=K8F0IV_85xYe^6oTR> zV3J_X!Nu*_)$s*5yzj~t23s-Wup?gYkRqLrv#B!2nb*8>=9}_ly1p@(roZkWo4M*C znl5ev>WgojkbP)zt`@-_{}V-NR=0Zlf1^SwLI9H zMQ?oHVIPH!b)kgTIhc}t2Y>B}XUjtAPE~&ePM|iF`kvyAe{SJN)nTar@+4mGsIG8- zz7OZ!NJQ0zajdU44!?~J!&Ji=xNu*K+~oCUNKomH=L4S$W3@&KL5^=Z+oA?OzL?3U z-xkr+gb7%6B%b}>Npt1t#p0<+%|c}C3{X?ZVfPXlYiinXkBz<1A>lBWdr4fhPjgYI zNW<%8ouJuPO{{V<78jMcP@mmB=;^i-@W^xn;q0ZP68s*jr;P!(iV$%@coq!bk}Oa+ zBYZpA1P}c_B-VDR5`NCu0{%5?;QXwYsFtoR#tcu#%omgBagmZN?MNY>v5tXx*8REt z%=PR#`yyX^Q_60g$KuSQEqvvIFZS^s%OxI4wA!>cO`YRG8*08mVmAlg_%2O|dUgit zpDqJc<8m&adzFI5DsiW#1LU>023~)uhw9PNj9c@LFv2yQ(z{$f5*!ts@6x{z>2M{@D13$I_53*MP3tU9!(XzAFUvlgzVuCJouvGEEXkk=V! zM-Sv#Ru5=czb}+iumpC^UWi8rrwL{`!4y(hD_%d_ftQ|*=HEZ14(6JD|5rWoqvx}E z$nZqr$u<^BC1z)8<084E<|3M?-HCnIPQu`elZA6thxyI442m3P%!M;M8n*KrSrrt}_A&Z!?bXBcpLU1f(}Aba(c6n@LQzQliFyx>LXbA zAe_F5s^X6~W@zxLH(oxmM}AkjTQ&uqgAemsX`TC1=a?i<{wpLRf1kn+R=;H3O_o&u zWTMdf!75Uy8?gB#T{71_fYUAep-!JR@ExtM$f>^0pJqpZ@BBy_0EdLzDTm3&GzT`l z%n}o9bD_PfJ*7>)gm<@E@wkR=WHS3aSx;yLzkiO9Q{zUyYk;>}rOAJ8h!ikZn|%_V z;0cq?+{r(kigquB<37r0AZr6-yN-M)xd&7Oj98N_WkgI9FZ@v8B8 zx;5QUsJpvc@qae&wjR?l-^B}W)|z0nRV+NO?Sy}`f-&K@D$Xq1OD_+^(;$yy5_YFO zE-}gE_XhT0VWxy}Hr`xo9N{#`TZA5{205njtX%M!WNaq*=(Hk5g$! zc_7WLn~Y7DTcJP}4F;}vx$~AbGD)f-HxGaC9JmTP^;1Xpje-14S15}KlFpCi%Q<(} z3HKuBQ($843Dk;Mh!-*$_muCHCe&F%zRyz{+jNN+EgAtkUd_UQ(FM3-g*6H} zFKO!7Afe~c2UPs77guD|QuC29;^eySeDc~-JpOwEN7}R#LwlVD*WTwr|7RagJ8h3K zGjmYo;9kk^ejWQQT8`K2@6*<&J}95zE;!{sfq?^BDLtb}NGo5&()ozm`AS$h_HIh8%K-PpC_HxN8{JYnhJ)HY0UM1C;IM5VtKDtD(I?)4=$y=n&o|(m zM`2*)(-Y&0Z+2qI|i%akJ^vWTDF}FbzRs^Lvmyu`o$)XDIZG&cNS;Ftvi0ufJ#^X zynPo1W`oRK;%0vu;>AmcCErj^sg7XMepXB zPGN$Ya|YJE8-)9A7PDHmoc`#i(2A{6_iue?#jOF7@9O9q*zsiwYxfD}7Qb0|!KOR4 z$xo2)Q*Z8FX%B9x=g~eafjjQC=QzL7SoEY9=4qW4I@PVfHf4K7?WYm^rOk=E>ZS^d z7Ej{swIy)rzcO0$doIr%*^i4CnJBb7zD3CCB|E3{42K%5qwron&h<*JJfl7sb#%)h z>(P76e(y~V_UY*Tw+AoV=EL%>dpJb8)-DP!rpUIjXtBeByLT`X_DMM)yC6%}KXRTA zsobYGikITn6T#R|V?6&I*aSOkYlVb~-f(H!J9=n$*m+%FEycO<$rScO@=t~yVD)X= zp-+4i4~|hsv$$pqs&(TI>iU$cX3A#SUZhku4eO!}6nYj9;q?q(_6k@IMTdRPdsz*_ zEa$a6y6!j4x+ie`EGOY@X)})O7R~FFrz`GwXA4t9Qt9GxHJtLzlqO&$y_oeD2G91w zu%=^R`e`A#4~@e6y_fL1qFM}Xx0H4UEQP^gDIC9im|~d4btqk%4Og_|@N4h=xTt9z zf5>Hid@q?D`~z7buEpx?e4HXL#PL6OkjKdq*gaXFk32dLYX=q3HkXTVY}Rh7cvC{B zVwcdT8wbH|vAz)0do)eo7eHSNV!8bBI#{tX2Ir4HBF~q4e|82_ILjqU9`XGoEUaxu z&Vivcs*3}i_}&-qPs_B%kE$LpM7UoS1qu#gcDWdqp`OS?T zzhF)|GZJ_tPhOl%mXsjG|~sk8Sj1XD0YuY#_Z2HzBY%i4_BDpnuUp zp0J`Tc0HwyZ*%tu`|<-o-zQ2)lza2BrHPw`;t`)vXGmrmn|Aa?w30yp9 z9Q)2q5I_FxObZ%gX?brmXkESlJ~WIFj5c2uGY%hx=a=Kc2&r4P-Zy zP3Cd2Pm-A|KG=i%mss%Fl!t8Axe@H+U*e(0Rpg+kq3UxZ`J=@nxYrsfn3{LRe;t%? zLWmQNKDdW>e)=nH8QB>Q6h9R@_t?Wf#+6Cg1zQkRdeE_DPsNLS8tK~Lxm^CY3x4Tw zh&^Yt(7!I;!q5m?Z2fqKCNCIBZ~Ffh;*BC$v+GY7a9N$a{_O+JbL&_uU?|`5FQc~b zofszX!?wOTG<&KAe#z2B_v&cscOX`qthCP*#Eaa%#!!!4G!0!`a>g36h+7%vH}nK zi5yzr%DsnA;-6Yi;i9q}y4yd1-VvQ>^|&6K_s5kr>>6mdsvn5!mcrvF`^8PK6LFA3 zFy2eelXyDg#3{vYI92tL5YcFY!$m-~{e$u9)=Dv}Za)UtNDQ14mN+5(tnkIGPU!Hl zjMgi(@S9nlxMiNPOy!v`gsy88hfYgDI&xKRkZ6SWJ*526h|S!2eE@#f9td{nr@+rY zNbDeW@lPk^8=XheyYO(m63HiIYBdLF?_i zuI$E6jr{(I^Bd^a6c;z#4CQ>UG$Bt8KZwX58HbrJ9C zI0n?WWPz&0d(#@>1V)Qj;cd8r+55K2Z0DR|cvS=X)?HY2MX>0-KonL>EMfNx{&>|o z4Qrq8Nqi>%f5V!jPMAk3I1Bo|a-kApO zfAbPpOWv^E$qs;CN14tV!`)Nr1=3WYS%abtBI{;U@yYTZ@Z{bqZ zeDs|E3X9Hl;a+~H$z`8(Pg`(K>=tlPV(%Xn4(P|=fO}~e#9?^gN&uv(#>3&`0VrF$ zPu%x;GL^;`!xr0J==ymi7ltQNIqaZT-F4`a(hnC!t>&j7j9WJQ)THC%ECcF;u)r!3~pFP|cL3SijgAx3t`XLhn4- zv{>S}X^*15t&`B@g$v%;dIJ_;%7KB)|H9xoS~3m6TOl>-kkk4pK6G+p;Qp z(v3royeGGkCT_3MOvcl!xVTQ2=VWHFPmmhV5nhq<#7XkIrFyc@q$OLwq=sUpXY_$@ zpE$S6u;;6f^`ZZnhje)FSyml!0Y?rTitf8_kfWEn;?B-=yl&A~u_2`wUv5~AI}Xi8 zwYVLaQPo*7!sej(ad|P+O|};|$0yPUDU-fmx(8@CX7IkUZTS0CHBL{^mbJ|Dhf!l! zN#3x*+}$gScK)!S4Ot^#WnW{?hqZiZ-84?S<$&8u?C^%fkP0gHrvsa^q4SxE{5Bv0 zyANB<{f3lb;V6B+s=Xflge>@Xr-=@9+)ZwKbNKUd%=w$KqDy z(R@S4oT77gpr_?FWXBb_VX7-`ubPFcyd9R3L84c_JKWlxEGh4XD&GOteco zfqI2Og5I9ZY;!1xX75m9C#e^ATr&uIT84sE&s(Bpzc5-kyjmbPkrvJ$%`22#xNb(7 z@WF2leGQP@nq8)|S;T5_rZquDNGrY6T?}nLJNV@0P1O2Sj}0HMlOK|eLyw+&NwYy> z{rB%ozwD>+PUZi=?;bgM@g{LWsS387dIDXRrph1duM-Y7^~Cfit7!XO2WUJKN5>Xv ziov_0gtv8rc}DL9I9d|!e5fV^Z2sDDXBQvZyHgG4P!ua0E(gaqTX}I*Ht2=8L$l{X zP<=lK%Ad#4Nlyh0oc)5WwTH>x+FYV}o>sDnj``qfkciu)8AHLf_6j2%qFP^dJ`(Vg zZOl@@@u@GGe0v4_s8vu(j-s{^ePHdO-WWe^DF1GXqrQE%VM}xZ{P_?pocg_yj?|j7 z^1gbCZv7#ICwZWg)dy$CiG49M-(BocKc1b;U&~K+@`v^7=ZJGU`wJCPjQt6I#0@)ivELC>CQ$C zl6Ckcqq`{jSlFrJjSzTgJ1xr5AU3p(*h`=CWNwyr^kE0d4-I3cK@MxmUkG z;$PpzFlFjlSlc>9SiiV8bxX0M631f5y4jcJ)Grl=Syxi8B16U8heH+P+J}PIH63^; zu@>WIYw{bPYG`V=(Agp8usrW{CQUvVg*S?FF#l~iq%ChF(s0JJKXvfswOD!Zk=ydB zGi7AH&R5#;nSrHqd(bSg!7A?)G`^f*znHt2Tib!RTN$Fko@zQaYnm85 zHA4cFIP>?s8?^C(8ZY>Gjb2&)gFah2a68Y_bh#$Xd5ZpUyz1~B;vc*7n(r^+hm$G& zQ}>{f{l$1(ae+&Bbmd)2A(YUj&26>U$)?qnd%jJ>-vjyZ^qDVS&o@eqb@6Aae@cQ z-Busw!Rb*by!lEmzDnoblYaQpvKP1C_n2lzZI{QXcHp0#9JxbD8XefXgl>ITh3k(x z^Ows9h47b7oEo^0R;opF>g*XHt*hwMoPi(^GJKVYtCSF zdpdnE9EWrN9A@48_KGtt4tWy!hF<|G?g}D@8t3LyZNO z$nyC^x_Y<0VyHA__R4sLcf0E2hd3jwadKtyZgH-jGl#dNnR8=cD^4;Lz}s;IEBBp7 zh6{rEVscO6_?slWcKiiu$?UPSY!Z%h?!_7@0b<|d-DT5)!g1QHD74vUNE^bJ;R3}; z>{v5JoLRD*f|L8BZ^23$+FzMe9FlRUUk~nhE{JbpVodpP6g$?tIBR8MxFdJMC);SWu1whYpYKcm!0A^v7E& z`qVp5lU=Ty$J_d|smqii+L_&x^eG*Il(zc z2PwvM8;#!UPe2LG;2N6|u<&1RVSZf#s#ag%Gf&K9u6weu&%B%5^Q-|J$TsBldqPuE8C8{baXx+40q}Rq`QjU2sxI6NO*q3_h{1i1cHk;q0kzFzjx+^!Xj!-+C+^ z+c--YXp)3+nLgZ3ntvKw45q6ew~^6?COXzN1h3cVunG<2lz>W@{q_~Lm?gqohhnUp z`GGw3964OSO-Qe}j+zz|s7T7%7e-G;=NIavIo}W)TZ$mZ@FD7S{R^jNDe#8cQ`i#w zf{&L!qc!G{Tppf7op+34k2Zfkf29kizscYgnZHCAk4>~UUYTQZrz&veJ|WOVZNclLK!!5#2b zi3`JkM>I0GJ3Rawfir3rioM6xi2GYjc(wc;={z6G_u74iwz7BPwsFQ(*}0hx*~@9l zq}?1B9m&lb)@C!0jbQ#)l^rY-*i~+xxh2i1fG8 zHcSR_&WqvFvEPD`|8cgGyoguwc4B^RI&5mZ4QJ&S;eAmmdB0i@2fH69|KdHo@WgVY z=WW87uBu}B`#<6;b62*TbWE_1en)m87G#o=$C0^Gf7MbKPpn=Diks<>TNFtaYk$Fj z<2j(;wvaFS+Td3-MCTvTU^-p$!A8&G2eYe%l(wbxcz--q&J)l~9wYd=g^9nmoP}D+ zt+1xf3O8pMkg>NBTnt-|EALi|13zbp&mFX3;GrSd_~aCgjbF<<_En36I^LvR-*?gj z#dV4@n~RJ63G8!kL+wc|VO3rtj(RH1EK>Wj_OVZ}$gmKP?n@7p^`Nrp5xge&5U#p*kH1)jQSt~S*)Xl1>|UsfNsbk`(RLwh=+uwr`x+=-oK8jM z96`7i^paL+$?>sUB{qCkrH1$ts1MYq;T}?7eAQXdk{_fcTNA_&?!J8cd^Q*x<>K_( zJi*=Chh`OMV4{yQTOOJwDiyopwT@e9slPNg=r|6-=A5DY3VCTn*QLDZ|2cZ?XPVxVN72^JxUKZwv`G2Iw_!ZF-yMpRZmD< z^j)~-|4j69oG4B@+Kw;WuOU6@^)8+#`85(x!nC>1u{paNHhD@+ttm?Mz^{O(?+74| zK^o9YNfQ^y&SI@)DW8083+sFE5ysU#RWm+nt8+Sffso8>#|M4EzZ( zAKIa%pCMPYS#V>>PjZM$qki9P_+WDb`5ROat@DceXtW;N7?d7+aSF2 zI15jI-9jz*QfMt6p(zuFaE0z5)X=nqwyTEpv6~xRF6aUA&%4oyZ*R%Ks8VoDse{GP zi@+%b9F}hrmmVtRypKEKN^OPwqfRhAm?+W!^*osWCxn*UjUt=sN`Io zpo^{%pBg9i;)DiReR=@3FUfG$tlWVX&3)LxQ^ZweM2 zwsQEYIh?RBT#OK^;ZtrJbbQ{63r3pLIm>1I-2NFoEEvUyYTTR-?Jhk3juzpTGG{(A z#{oT-yd@4;K@SJjK)Uq=Ea`U>Mhz+z0#!eYo+aPJOy%Bu`kpPVU*-dr11>|#)xjJ& zGC=Ydg$Zkq?4|3o4q^F*PHevK2*)HC)0}G;@O(!jTHor$!@J68`j}LFEcG_qJ0@|~ zh7p*4;w~9hH}X59qtxw}24_ZlaPsPG{A6)Se4TKfMr}nLXx$%HUOh=Ic8;7szYKeM z*OL0)Ef9Mv34K>jkllEhMOP{}g0@93raQLySzBT{dkW&cQ%1ZmFAWTb^vA1S71-{w z9qUb8!Is5_oc2PAKeXNA13wO-e)(<;?^lj5-(BL^Iq%U-`v2-b)QWrOuMmIj>V|i} zpK;F8%qN=Nj@Np|VbwNmP`)=yw%7SKUa*Mc)23F`W7;ve^Po3&%c^(!(5nJsl79#r zFB&KYRS=l|?0^NP+MNC968JZ7A@$eQP$RLwpK}2}Fjc{AC!=6{nGxgpP^@yhK)0m) z{@{wUV#n+?{7S45HmaZGOAAbR@uGH$u_Iz>+rS8VYp_g;$)@q313|pD(_LKPx=HL2 z`j8`fdvfLMgYGJkS$cAB|!c*RF!D)B{+a zm4xSy+v5aM!7jhsal*|^{!cZHXZYR{-}aBkcRm?w%Xeg5Q8%(H3@8|bJD1$&Z7y*vPn^Y{dbO812U>G+$Satv z{uflfs8PeJSET5pMX&Ff;&kc%JMH&bIMVepC!RYk)IRjXefQj1EjN$H{kMA3jmXExsgKo(Kpvd+hOt8$OJHwaIlu-|btF~tHE$ephga1;|KX@n}F-nB^mG&^V zXC(R#X%)s>xYC@MXH>U!A6A|@!Bv%mK~>5XCRyJCx!xu5LP0)R{TGWKlV8HEg$F>} z#8mv4W}rCrs~bQ26o}t!0H1xyDqYGBb0Y5#=iF=@R+j|c5$n6R#+?H3y_Y&n8rG9X58Ma(fkVVkoksGjw|~iMc_@@*?WOm9cEh^r zSmfpQvU8hbF|$}%_C)du=FFCMsJ88><%1fZ-4Y73+Eu}$i-&l1a4x@?q|0t?+CoOi zC@ilJ#q`bNFmqih*j^flAx(`!zq5|m<$3`RT+spMs_qwl_unT@h&)V1CzRRu#al6U zW*SWDYK#AM(cys|-RSwo-EekmoKVx;MO0m)%Y{G9xL}eCW~_ZjjxCpj##2EYn^Y+p zEgL|e)4u|f4tJM0dDo7VgWI%pbntG6&p$+?*Uol|OZ~^-i{v?EG5x$aZU#Y>-)0&d z@mVLBIz9dIO_?z+k(e@KZE8=1FUL^*D2dlpT~}wqvdHf+^xz8)ZwllC z$=VW+@Edgc*pX+r3}IQg!z^n_B3+=ANb|`%b8z1}=FCWvOkj!G9f8i=7ZC{KJ2Hh1K>kpvs z%9*&VpRySFu^+F^nt|P_e*(t_@}iCBou|0=cE$4!ztc=5mzl8~8 zALk@}Kk4Gz0un{7ySTsI9BkCTO)qK>&`O%9q+@!=haWej*CY^42|RmQa`cx@iHFN_zqGYAA*_LUm)AG@m)p`7JWuDqI7ftFMJ5iSzSlM>v`csg%o(oQBf5>tWWPV9`kJC470LO-ena z9#Hja)|2ul-pSJ6NSC8jc3w#<%BF(+~6R?{OG^J>F@F-9!#F3+0_lVtM=oeU2WGFZmrjK}+n<8hRS+-6hm{ zq+$wf8Y>D7MJDw9hXEF;I-}&9qKhRVe9Ms0IAjaw_sFKZ56YbusOj*+L?d2P<%r_m z#niRKOq^~ro|fc21#yZ@%sFUFiO)Jgd`mdGw%dS5RCfu72N>~j`i0G2Gw|C@CrBEg zBCgwdm4X^aBovZl>4FZGZA`3EW{&2n) z8ZM-8_kmqxZ=ZVcr?9LjTTv5nVnsTv24|7H0@3VL_Rq84J)8Mk}UHHb5J=Azi zQ!(dXI6nBRk0q;P@RP|$dOh+WJ%72A!^ic&amnfYUVE_OrZ}ACvre&Tk^|~&enLy6 zyPcK7i^mRn&xH-$owl`G00-}9^B0VxgZU{^Ph>bRb&-kxmPowEE*p8ni0y2vYlA!5 zn)p%F1Rk9rQ~0;2;#9MZETwud{wHC-IgV&&kibA4SsoGvnrr&=>Ut0P_l<@4J-v;E z&Wq{$*Nxmb_#6(b(j$i{@$}%wLCC3c;N-r2S&~MQak{hfQ9T>}kk~{jk`H0@sC?)& zYdW6ZVu90ABtGD=C6WhZAZhvDq6NL9FzT>^qIU;VW;pS|jbE9PJcL=dHu1^leeu~I z3)ab7Ep9w*O#^>jqP2NP`9M+!JW_AZ>y}4wnRj>oC)X$EQXh%;6z-f6m_=UevdWe$ zwE$TLvcb1%@o2d@w^#c`)lC)jGY2zq4H15z%`_qzZMIX}#5DIF55?*MpAJaE!CHrS{G$xc+J`ludnI zR90i{Klr#7Kgv(>W}ggEN$QQyZaOC#ejJ9al`qBgO|vNTW?QM{pI`E~5sT$= ziJPU^`~1v)A*$#gS637wk0z$wI-WO$(IdSvYf~}b9{&<|x9f}_a{t4!d6#)o(NYL- z><{y#xtxVf3p7054cC?%^Yye{w0UQtXo5G$b40$hdngwChi>8=i4ESEogg-9KLr<3 z#^t`{Bn%6o{Q=!EzV@6vXq5rZ`FU0BccqWqdjA4cifbi}uuS-8e_8&aIsv?F`h&;z z-5BF5<1sZxczx+=YE*dv8zq*~u8@6@v^JXpJ}!g-NR=Za)-uo#3E%6&LR>nP{!5jaLI8ZwiQP*C7lw9O;I zWXEK1$o&CdrW&H~&zp44{2()n0tk#R;6pc670N#}@ey~RyZ|-4zCq%J{<%#pR-xRI zJccVxpUHE6zJUl5ozIoCH;Hp*dPVl{R)AZ@A6^n>PwKv%4{#I#T~0kuxf50 zjP%=u-V3YACd`&T!Ur)<;!4%exhEVuu5H{ah z3hO_0!S}ac)A3p@`P?a)P9Y5?v`#FbbK7bJtIwCkSA`jH<-L>}{Q44xc95R?J^SOA ze-UDrfT1`@HU@2y;~@Bqy|^s!mwedSr8M7QHaEKHLwR%$QSk4Nym<^iF+WVrqq9Iy z;yKw_zrbfgAYEM-N8oo>__#a?vgVzK4Uck#@O$UP_NS!}FG>g5e~F~~ISMVUUqG2& zs&lgQSmF88{^;Oa1y8^40MGsFsCO|?vW~gv<1Vp&q)fBhj4QCnPLFT)+Kz8@6Ul5_ z6jTjX7h;=-Q7`L!vD~XageJsNyO>@)gfm&Y<{fIRSjummw4|L_SMccbRGi+F4izUR zQSUp~A+F;D5(EVm$^&3i#V{d0>m1zg6eG9!9zx4huY=C%@px0Z2M4)_;-{iI2pH)O z32Ro6g!PiRxO4e?whx+|vE$;ob{t$5k9V7a)?Q1dw`qo$sv)O4G9PT2+7(LneH2_~ zETZc*=HTpNf_lr&!M!Cj=#p_9r0ZS*v;K#nZL$vb9G@(9OqIix#Hm=BGK4owiUq%j zI&iwD;AeXahI=-C6b2e4ib7(JIHZ3o9mOJp?*~b&=qu^Bb zRLFUB9hRP#x^G`Pus!U9a--2aWRsgrE&mO~1{MnQCGPjBzn7t4RU(Jaj^}r;d^qX2 z2CwXX9hOhrC-}ZFmrbhH;-jhC(8XFV7WR%|eOn(;TX%&FI*j5mXD8zT=T*4bX$AUu zy(2v#jXfp0@yI_GY|P`@%WTB?*5#QI~Dp;)pq%NOC7N z?3noyF3Ki=_L2>xdD|3Tt!)`Mb>`r7izUMdS8R z!ZQ;}FTdm*@?Rf#b+|={mCn3@N|F4kWiJ@#yK<&hB(088Ht=qm>9KKV7 zw*+aR(O@|R`OKi-BMjJG*_y7{e1JFOC&H=L9>V*rrShN(X-BDV!@;R5Bqn)1PIO&` ze$A;Im0-hC*^TGWVO&-`7DsLKXZv1N@Zz00MlJIYH$DC)dNeJ8!4jwI$)znUyFL*^ z3U^9At9X$cm6cb0gi0Vj^u>Fcx3^y(nd}zmr|>TKcfgm+RVE;Qbd( zUM=M)ebz_PruGtXgxngejX8?lE=dlmSxb5M-jRsE^zH1Xo`s}^vq-<^Lzw*TIDBpE zN>6qaa_n~LcGLO|BJ2(LQNcW{Ez3c(ysl`ia8Y;~pv;4eyk&BC6>!|~z4$1|*1Kdw%%s|OJu@EMG z9?3h`1PeVg?5Nq!fg&e8fJd64TOFpwk;KRIv5s-5&dK<#CB8cGsHa_I{x67pjEOn+v$_TrK`zf2ya$ zeoDWi$p%{M#5!+%9yo=-XKofb=J`SV%xrKQCeO#Rd}w>rTe^b>ICPi3Q0LJDYYkpY zo#zvz-Y1Cc*LIM%vKifoEENNk){yAd1#=`m9(G^BOwYM6^)a2tzQ(N-*A~AWhiaF0|DjFEM@awQ-ERU4f z-@4Plynl*JXU}(A?R&38&&1o55!6R!vZNbYUpp%FJHn#Tn>`TM+yt5d3b?8vU08q; z1F|w&;{N79$cD9|W{?_J49*izCCKRC<<~&IU+Kb|!4$mGiz0fDlbA9&cN+9^KfA@2Ghj}rn_YwizF!BQy;{#xxM zj?L}Q_NNrEe9BN<3Ih0lyb7ze_lpC}%Ak8|GRy0vu|v}Zx;^hKTt9S*c8v|AjdMrB z;U3*^9*iM-gRXYhruXA2K3_fEC(g#A{0m?9+yuM*mHCNAH#F{e2fw~tCrwyNb9>+X z|Gl=0j~)g^Z3Mgaf%tWK9KCBT5Q>|1x#z=q(z|0fw1269RS`w>;B#kMeS0rCUbca! z{ZiSa$PW$v+Sa6fzf849KhU&5s6Me}74=!v3)bvgfQ1@4!UpAA^w+PL931Rvls(dvF66ZRMij=PFpb_Cr@ zPnxtmmpZPNl7sGMTz6!ka8TlkuKFR+N{L-*@x8O49H+tB<{L2jo;q#XQ^)%+=flT} zG`^uaU8pbUONU$I#k|fEXGt(Y*FZzuu)mY|clSlxbqNF5uW%AwJ-3LQg420b;dW3> zn!wvmgrb4^YhnIIZDIH)2fSYKfnM$#kI#A>gvuH2;4?3R?!_C^*QphBc!dUz9^DVC z!Vgf^f`} z{}U9}Bx6fZrD#3B6Fr!GntNSHg3c#C^TdfJxZZz*B()sDEhZX#r&0@*)LpTmN`=$) zBW=nxD@Cy_j#^*crD^ka;eZJeZ~y*Va$jCU3ab^w^Im=VcApbCS{6+dH3<$Yeu`)M0+qY&P@W3)Mvf*hpS-+-%)Sa@*73nByWa>taG%hMa>tJ3omR z&Hsr$lm8RfZG6fd6Qg;=i~e>Y^&f=l%tDFRu3A%CQ;u0t-D+CkIvo2gc>=q25f;Zb z@SDBgK$a_``F-teV=i82y&g$8<9-S!78cR`1E)|mFNHoAxk7{EMb5jnnnym-sHuH) zk<$8I6?$BDV9#5d;MKKPAln)!MkS1-AQyG)d*~eg=h|5~+jTh3c)A6=-!6k{kIsA|{k*gbB&T<5mk8(8{&5t>M_$b&A;e*xv)HqgY z3)?hw<29e$dFGcn@Sz*i89GW6XL<6SGGk~^Val$4MtOG9&ShT)-MzP;MoC%lf;bgS zf98VlG?>B;$V)r2W*AWT$TsD~Q~WL056^Xb24@Bj=k5^7_L5Vk=Y3Bc;k+BxCoZOw z(r(iGp)&rwd<5TYS%IFTHi}6b6>&#z8~XY9vBV^4mS*&$661dbcy$RQ*OpA~ZtjZa zKMgp^VKR)lKZ3Vrg}?@jLEIPfh21~);+V0+dBmg%c>a4VmT%C5+_41^KCe~C&ELXz zr_BPZ<|CIJM#y!Sq)03)S$VO)E%<(1jEk;Y%ga5)Ax=B=sX^>J6kcDEEi6PP83b| zMdQ3RU+JF28nHZb9NfA}dtA4p;Jrp)c%lA=bc^Ou+d&OZ_*7yu?%yZt1=jVj^j0G+ zXqTfIl3OP0$Qzm$;7lJrPm%JsQcm7qOWMg53v~l+VaoT%aO6-=)@pLWw-WEkXMZdw zwoAU^v#nfuDHz|IBycUhMfnhEZ#v+tc(d8jI`F;~JnbR*OZ^d)6-VPv?L)Tf0_Di# zuqplCMf7^kQ`sFCU3xWcqxf*g6;3?-tGeogC5!(RkiWMgOq??r(kq8!qV!I%U1o_R zbCh_sVm_T-oX1~ORP2nOF6NCDwK!1hfz+Ui`*rnEZTkzHc3=hXF{u?3UKp~9#GD=x zP{n_5cw(-%9?TrsCZ?E2isy_ngdcC7WAV*y*m+bNw#=)Ls!(C{d)XrV)z^()x+}r$ zmx?&7&tp&@e3K5B`tsxCPOPGNj7>ErvWk)}t_v18&NK%nJ4AB#pJ(`jMYb5m-oof_ znJ}+nCJpT#CCF;DXn3&_h&GontVEOht_tVGt~=;}|94QY-bi*6_kdA?FKzd`42nTN z#8sJ5FycuP==o)0@s2QXIem~8pK1X8`jNO`fsd7R%(DJ`=5x*gWdQ*W)HG0J^)jO41>lEoiHr(DTNJLBYxfC zh#?EFQnQbpY=rkSzO-Qu)o-~D+r4*^jn5tO{oM;!;2fH=dIDa_dna5^d@H6+7%E

=WuLe9@KPd6GJ{!P^T7ch#6}NYPGg_;C&68Yv=?rgfvu*SJ%#4)mg%al5yEg7`zEAN;NTE?d}pgz*6$#k)A=PVc)bLMZNGv8M@zf+ zCW+movlpx_&%@3tCbJWclCL!zgTCa@iA6bdVPQ739aY0)Kf?LqK6~yjv5=N^FM|*E z8IW$M$-6$e@*VF2q36≪&xS=qN_?*Qqg;%-=enExC_$^Syc@3$`^C;yH75ysq|8 zR%`Z45E>?2oqcTVPg+HV&;^4>&FwCPk~m*3-NB_ThRM zCf!-Tqcnpgl|sDy0jl(ihg?@(xG=Fo*ab^$;8Oq%U92d0&C{gc3UOlKTt%w;+y+5! z&qK!C@to^Yf_IbE=)%_ZV5ZUz?N7@f)%%j*qR@|H#{}D|SlFPq@^knmWhR66-a~so zK^&(fX%z-76jua?;r+U9SXdu}cKcSqf0D~3ee5`)yL1ox92N!^Bd*e!p=tEdGy!&~ zrqQQ61E96&tN5>^QGBrA7`dK3N%wXb@b#Og={-J$2XQlCS3=`O<3$I#ndf_i_`> zwa2r^s*y7JKu^q3w}iW)vv_qzJmuzomN?r2E;^{p-=q$zmRw)H@pv*-_n40>mV^pb z!w<6k=L5od*IIG#QAK_-LsTbFI5Bzf^4h_^-LZl9(pj2`(vH#$a%?m zoBvE)*+muh8F$Z ziFe~@IK=r|2wFA6AYNiNUuv#KUoxlfDcx~Jhs3e$XN2#j^@jToz2V=&TR2L!A8xk1 z%u`O@Mf<<4;PP9A?{|`?b!%hraNcy>5u}b2BAa1gS!DHJ9YZYJG!B)!{Nq1c)%o?O zShnfCk38EBqDP;-Flu=fD1Y35mnS;&)2@{?xwFJ|cwvRkO{qjz|MuuDzK_|#AB$Sq++aI59PUBR;Tm{vv((crxynmh=du65 z<2WcNU-Nk2;5{6ymGH4PGc2c+6|X}71GCvSy6 z@}+`tyrb>W9(gdoTRNNdDVOz_@>o#vxCWDtz2_cY3U z5DyH_qOQxwKzKn{-e=vJ`>HG0eRDnslZ{g7XpIjV4=uoS!@+!dK%H$vNCS?%&>qJuq`gI3`55vZqxS{wVd30?+zW;WY<5JSy@csrPvGPAb1^)`8sBJ2-V+ z7+K7*rG?+4=wf~h+jlddmNm=xgQ>QZfAOGUf6KwV_!idfRl z#2;~5JZFeG&$wK~TTT#mx{`*y?wrJiao$W6jcS93!`ttVF+Sc~(I#KFa*W^Cpuy|OOqyVSz38Ho;?ye97mxp9d&Lp#5Y++|*63=H3v-Jr0ZPf||Brv)VdR?X`>4;`UR~ zz$P2V1tGjZeJ!XBHs{|Ft66Ps5R5;1feK4(AZn8ztEcs)lXHS`hNdD*#vHnrP(;%U zyOVH!6^}ACC$-nD^sus)$LmeQjJ@5(`sXWPn(QQ+83lv&D9O`MGEGQ`(`L_{1@yA5 zH|`7**rn4r^qY|(tTsD`yx^MTD9i@E!5_sjr$52Jx!=jF^B!UHOmiG|xsV@r4xk%b zF2NT+Gn^znYjS&4;;xGWuvgV38nr^|1ok)qorikh+DIen;C#LE@5_i7hzAwYT=A|J0A6^hQ7X`a9v5j=;Rp~;I|0- zXFU{ez5Gk}<9B0E)x+Z2krhzA_A~!oagB0^eiAGW^%Ea@sqx@B=3nbKI8;qKSJxR4&>}JuEda{lOz&3>ZGdamV{IaJ;-*NNJlPc8Fhvp3xKO{MQm|gVaFSZfwJsH|ue$ zJffD1fq3%wkLrMJC&d!`A$0xOFOJYnp--C94boPLzYf_AahCgdz_6*TA2Us?@GOD8 zU-E>pd%N(){psMX{E{#yiqDNqBlj2IpyW#o@3E3&KJxd%H(%_LwjMz2uEyS7~5VS5Rvu@^iA!Y4M@Y-3-T37PPo1g7XWC=Nkvo_AcF>XcD`vr;q4qe2x$v&aJzIHUi=Bd!FtuA!i-&9d}j;ah&;e+sgmC9 zA=-XkoljcXaf@0%&im|1nOXg0&n_+G_{ML-xJgoH`}`?b^ z@W6##cu9&9r)r16h%J%Awml(O{_rKME8h~+@=V}@mmLm0p=7rqK}3^xG34~68kLOp z)4$JZoH1-YrU&ikjcsFakp3cEKTsc?wnqpJ0SEcwLlg3sKZ3m?k}*!YJ)K|t8uqVG zmG;yL;8Pnze3156`tLrh8yck-)bf1h*oBI6_O zd)2k(_mmg>aq&SuR-Og1>Z35zc@%c(xQH)mDrr-O3+^m$K$SN!l*cq0me&?|IGFBYx#rbbOfX zf249p#zHYsLkW*m>hiH=sx>ByB_H|W{d{ar56=8YSf}z7hm9&?t?iliw z(h!HqUCTxF1V2jukOsLWeQ|n-Dx2P^pl7w#HD_(>uz286Y@Ij|>oQkz#X2=%u)iAx z%!=kr@5{K+epF3X8q)0rHq?;rjfUOwF?r;6@!}eF{HLfO`sz*Masnvp7YnJ+Gi`I4 zBf%!xj&-dH#L|FQaC*u=xNu6E4bQp4Q|ZQ>KH!z0a{at;Wz!B}-6;(Ye)fwBmJPuF z?Mp8o9Sd9L--a=sg`#5Eh|?R#_vNFr)2Y@{MO^#6m6nGshQ9L(MA35w3DuF*=Y=NT zyjf2n8trt#^FL8`(uZD$rpu1ZRbzeozI;=d<#ZFLm`UaRs~V;`{B@6K5rFTrW^ zV<}H?8V1cY z7q&d@AkQv>INEbIE7gtwJ&$|h&1=tv8F!87Veu7FzDoq`Z_7cWt(mauLp-cVG(d0n za(+I@9*Pc(#52xG)L@hY3$lv%zZ3t&n8ztxXbYh z{OFF*tJy~!Tyg{x_Dc6HP01~m8HU^KUGU%yKRD=Y1rtW*!OYeOdZagw?86G+g{La3 z+p1#;+)j?Qzis|jFQJ}etI4r{fw;P(6v8CW$}F7|vgw-+QJ45F zoVff4{V3>ZH?{c#+bBiBm*_OOqCE*p|9dF0a1>#+pE{pjB=Nwl6j*aeJf;tI=k`_i z=z#N1I=#)1cNp3HkGse2Q_s<-uDfwde=n?zkUVYfiCFz9nB=-$p`<}Ksay6O@sr*z z%y;=}Q!!vI`#655Qz3Hr)j*z8j@62{*I$OFA*zDrnQ}-i)PN@wY}o&2G!IfO!RmE! zDBbn3lj2%7A9{+OHJ$^_swzk+kr%@j#?kljdYB$F0RwC|LEZduFyxE@Uw)fNnc+F` z_>VfRUay5e8@tnjghgO8C4^mbJw&hX8?i3qEw1*Ep?trE{Pp7}@z;#r{PCqW-})XX zv|PAN8o~9v9Vd#VtDnP|o3rCR~+_vlHL4e z%kYrL4SW=kLdqxClATYXY_9)U?t89Y^!xJ^?Dn=`r>Pa#@%J3=X}TthO%D>P{H-ZH z?H}BIVFG81J?YlK8LTzAPO#mohZ(lUILEyfeqGrK#@m*n#?~)5d1t!t@^~1oRPyD) z5(i}G^<{i5*bZ&LiYF{^2F17Ug_)((c-rp`xaYMGy194bl{3eHisBrpZ@-PU)g+*` z={frE$9`@;DZLXcG{OPt%EI#N5p?@on(d~kLumfdCLDTbBS5#+xMOo4yPNd1doZsMZdGXE zjnAvVWKxSzl~9f8OSAZjehNgYJi*Gjvv_;m1W1}HZ`XI(JB+isi>HUF+Tppy#KWIc zVq%UkqCt(HoIL@0OZL%}o#W8#rAQ}V{(p|wEib`>%Y%8@ht76+8q;yck=}6F^(Hrrl4gvq{cBoHrgQMl z2H8t53$9DqhlL->YPKq2_9e4wpRgTxP~iw#ulj;d)7pje#v0&vW)b*h8nU9-Xr54a z5r2O;!>&`#aa-&tO36CRVKFV(XYDe~C~6ljCR`O9i)V1{>0iRC<3Tvc#u;_2voUAe zNDjEYlygT{im?NG^Mj{dF)=s>llx@S|FRDM9-2~d)kXT=wHoVH)Ufl7EJ5*mmUPqo zg{MEKaog?amC4iiQF8{Z$3Qj4F$4<{jaO}`l@KNf8y#g%QVw5s} zNGKpn$&u+{GMXPmT>!nJ-tfRk;sUs&!Vaq^6gIgpU;b!>)8eK5)+iQYXVlSu#TB%B zRxm3^En)o)Bgi3IMauIF)L89;7t`a>hW(xw?soD$?C^m*yXwKs3sp4;P!xeI&3)b{^q!?giiI;jt@dvYGW zZ8#!~(S0X`L>#7^G)3O2-68wn87jQI^Fw^uH-P&czDz^TNw>D{$4JG!frigZmtFd+ zSu=3nCfGAZ;=Sh#hDfglvKpkq${2(F0ymL*s3OZBY#^U?e$?jFmHpLBdE-4x4o_c8 zgSP@Eo$N%8efywZb{&1Vk}re?w^83@7h2VvKuwdA=|r_IZ%_VBO@0T^qs$I9Z|vs7 z8m%;8%{5lfnJJjIj~0@%-_y$_{!p@ZD)sJp5Av7C(t>;k-g0j!ELM?vg$cu1YyEw` z|8AEsLEDoTT04s7MLsyRrGO$2_s1t=oLD?&3a*dkxFK*l#yRWr*Dt{k)xM2i!Xr2s z>o4&KT4<@VGdzvyL`_d;Q)!R$P&#c4Tr|>xuI@`^6J6EVLc_2o8{}(FPN?F0W98sZ zLz?i?JBzW`XneQ4vDz-&L>wpAAJW2{1wHLST)b=p&sjGLhs-}rpPHtL`MD<4wiP8*ul^QRFpzhCUxWXcV z%I+CZVTcG-V;}IUi-RcO&~b5li1ZBey#gjt%8>P4frlO6f%oTIKo_MsID5qiJQ5$v zea()N-@&fjrlX8)NjlU|e>}tvsHB0fUxH;)DE++u3MM?W~=zW2UFk?#{zuXMGGS7H<l=?9@5{zHa+l8m(yU7dPa8Q zR{_l3Q!K_m-$Vv>d#UW_EwQ<@7-qi?L;c2S7W;Qj6ag7U072ZkTI$nGafA#mNWK(D>{K@l5&!iP83keqJ-D zgW-8B8&rliBiq?AV>h24St~4enudFCJc70%cD$#D74*1ML=mcssBGvByl}=xoR=jN z7B*Q@@INV^dDxok*DFZxiQZtPeFKMiEA#nX{p_%(2McP_{FMD3+-BC3`W!F2h(7W- z;*~ihxupZIE`*iSJ@8D7oKVp(8WRg_@p7UrR_zPL7DYord*c=8EbU;XCXBGL*!O^Y zb{>LrhedLfnap{(n1gZsRKZ?AvDgHe(nP%(wmg1fSXr4CMd zreJ5ZXCdhS_$g};B52*tSo(9y9VXwtiRMS|z}595aG_xV&hh&QI|9byiy584rQbb1 zcHXdN+?GK#cWhd@<@^mW@YSKY{r0lLn|$$8@@}v$>42uq>$z)I9vXRk7JaRba?8}t zXqv2SH}lq39P_-xX0(*2TvE{urGm1UowSDIm&w2wRJdkSPwts*21&z?(SM$)aQ|cg zcUD^lyTWJH+*^1Gdu{aMhl~5#l_aG@No*jikI*CkybZi{&Ueu;Gn4I7pW`;=543Rh zdeE^*q53upn+TJh?0c>n9&A>nA5&ELZq`rHsX_~uHAmA0v&9ouXQ`DwodJm@nK zCwh;8d0$skU0@{Gu5g5epDU>J`eB&S@E3N-U+2d$Ysj$arEE_3-|);q1mg*{u-5Jz zEj3ZZS2u?7r|iC{KFox(?_IW0%ZTNW#4ujJK$}(-n$}dW`pTuf;`mZkk+4BYyE;SF zl8e{vV)5_{n8C-Oqj)s99LuNYy632~?@{W%v6twyM2@TH9>v`E(L9l)J?KwM_B3iB zzeTQaaqk&8sb2^^E1y-*|Eoqm9sOx}wi4Pao@dnwyRjhIcXaw$UgyZN#Y`-h|9QZKr(^c4gxO^`l^jgWKr3|JKx z(vNH7!TI+unw!-i_`Kdn$36T+{R!JZuVJ>VN2w9|UYBOYsx^G%rV(eq+e$wBK8WS7 zpTPNPhe1twFl-VA@QLlYV#DrktWhFm&=QtGa{VOie%F+nDy8pQ#yafd{uF#J|AZ-- zN<4Jy3VK$#O|ZZ9#V`(8L5x12M>o9a12`lZ?2nAbIvj}ewYPkB_`~3={-8r$rESJxd4|ADv_d;FuElYu$e|h2hRdMjjDn_(gc~>0VGM*E@ zB*3^QA0fJ9J;h4J|DpBfxT3t6Ymx}QJ_e{C^99-(f@Swbf%ivOvr*649KJUW9%x&m zUi>>YQ{BkJ|IVewhdy)J7Yp>5mJBWr)i5;)g%4lP!o!&#;l`*(^p6_oj!;MjVY9Hj z+hCG+?(Eqb7v84HVwljn{Mp4`6z!pt;0W0 z*mKVz?cgK%e6D5X;$#hdoT=Rtm);+NA>XG7y(M1uGJi*Y@mS*3UX$a1Lx*5zaDTqK z%}Utjk_XG1+)3GgIj)@iN$h%U9C)oP#oseF@c7YR;APWHe!CsZ*l}ZR6nw^TR)x+_A?L78Lmj17|?Z#gBh-MDm4k5w2!2<_E(-%3;_~>T4Qi^?+ z*RK<98Cg)SgPAyH-*I>ryM?Dqxy;|QooRztBOP}bhVDPL*y717v9Uyt1I{~=v(H7S zy_=3%-Dil#?^dz?%|nozr;q)wwSfKSDYjEaEx?ZGL^QnAg)0tSBO})Ve0#S&zVTq& z$HVk_SdJq1U!VX(Us<4=qZU4VIRkzB#BtUYGs;qJrE3noXm-OXv9tC$8mN`Vn?I$B z1_vu?jq?t+8S;fzPE6oa7GrV2gC6wwz){o>SGL=1x(*kuORAQSZGnV;hw>?C| z7F=H5pV$0nCjD*27;)t_&NTf=S_Yb|cK$2+x_lA0>dis7FnP}DYKkXo8gRn-TQo`* z56Mx07Z$AN|F+A7zaP$v6(I|R8OLU_y0M_*^SG4ZYI_|esO?bP5 zU~a@gbPanBtv7G*nwvu;5#u79pMHoeT|1$6RWss4C1HJ{CT)-$;~O{q$BM(%@k_(X znpy7IIILN-CaY#VYj#ZKUP|oF;u=x|WZ@ z<)?kf$=L$pW^G1;@Av7HWh$Djzk_)nZ*pd;wCmW_)2^jq9`7hC#M^0kyz#*qSeWx( z7&)V--QguuaQgfp-q;v|aqU-eVZa!$n4Am8-9KZ%#14LHvzWsQ&)D7_wU+zlmVl+F zB{Yuc5W3H9LbrBJyH?2qvdO`OilZWB$3s*3er~X6@uY~$T5D)?j3VYd=CeSzB7*tdVRZ4jZF;sDyi6cGzVZ0 zxo|#HItshBoW&cu70^3TMZ8dG&$?-zSol!tFJ|P?j+23W^sQH zbuMe@W+A!R;QWZ~{QW;S(O~saYBjc?v#y4=En}~eYkLtmCf^knkMByeZab1*zd~Wh zj=w_0r{jYD_!H3VwO4fhnS(V82H}zhf6l!w?U+?&v-NQeFdp0tKU8D*$*kT~RPz#k zmF$8_+6afpf0&Gj%o1+xJ)(^GQ}?4kK*D*lek~-7dREv7v2q7iwf6Ig4VVN zP&4AAaBz`1Ia{k@(i$s?1H6SgUAqF``>)2}#H}cou$``qo5ID7BcRyR9CAb7fXr7kb%hjvFeZY*4VB9=H*d8V~Zer0dwcbuawc zsEiA~ABWpc56Cd$6D*P#f?-|`>{HuH{<)g?^YS@H5`fT&rb|O#U}^4;CfAWKGAqV2y$61)(^GhtoezQs&JHioWtpn+zHy> z?-7M3lt8A#I&#_9MNl2}0-HulbKlIXl=meE!J?k}6h45^?Y69W;3~B{ZQvti?;%?9 zAnlLs3^rwYv_7(q>LU#~%~x_dy*&k|+oG|d@D4qAB<1Kgv` z<-r9&VLBxgrSlicmgfrLsETcm>hbrqkQ3x9ziM9PtqwxSoC-Uc5L(KW#O5&g^b5wrh=8S9Su1TscA3 z>e2{m(1U8$q)~J83OI3P85}>|kMttC02``e#n){(uT`6KB>wd>(>oM5as$0{3FUv| z-DM`L=kk~afBM1V0e}m2E!(fpR z&#QZE5If9G2>our+vm3^#7~p5Z)@B6_t(OYm%PYwK@fg=u#bxk(`nnM0cc<6#J@Xl zA-B12gkO!Ds^bj1fwT2&Th0AlDWkX=gN@6jZssI>8F>{326nLRlri|}+#|75&nn2( zi^Arm6=ds`g~e+p!jJ7eab2<^oR88b<9U_h{jF)DgBVV!b0<;IoQ=Zz{1P$@ci^ED z`*ZuD49W=o0RcTLWX{L*@VQf&;MR2)9@zJuug*V3@eI)X%6E!=1Bq9Rd#`x z{ENb^ONur5i*iI&IW@lUVi`tj^u@t-_O@N^R`Q^SIc(*U0`iH@d@+1Kmrlr|mj|!H zCW*;aWnF|O_Jc^y=o_p&zE*IOb_%aUJ1F)^Fkh=F;XIXa_VO&H-f6>Shj>H>8?PSMD3W#pSUiGANJ;x}@Ey!c==yju>OYpabAGGB}}@WuTHw^Qyd z56l~$MA3cw@xa*5sAXj*7|BMl^Rs4QxWsOq5U$1?b%Pu0Q+VQLCvcQLhs?>Lb_XB* z5^lHXQ$N`xc>1lA-5=!;yZG8T?tCOscGDsZWv)Kp{4SH@dyj#(#!7mUVTnIKXhOy_ zM>rI-i1pw3(DhA9`2OoxS@ZsK{*-rF>P~)UXAA@EHkWj6{G~_W&JD5e;eAUb+|d-k zQunk_ox6t~^)(e`*GyP9qV@#nl;+N$`yln!P%GK zlGH#03R6*UpqyRMfmbkM=Va(5W%DwQ=5R%_D(%axgX{H)Xs)G;n}_t( z8zUv2;u?(X*v5yv3-S8J-T1fHTgd&HjCtewaSogyl?U2v^yW6nPEQc(&Md+?gB0!7 z+)v_;JNCj-%LbvR&Rq(eycgb2FTlA;%Q&%n2yE&(n9@F*@X_fDgo_gwVvBmVl)1>^ z_0p{+EnN}*t(?WxvCrvvlqq|-9hN%%oxw`l-&xFNS^lI5LG4dh=sny93X4I?8L8Fu zdMWTfg|+zRR|#%*yNgNdOlk`H9F!RG#`tXP3AF9~4$~(|xtJZ#;FHQ<8@B=tuFpg~ zKQ@Tv-O};eBX>T(a3QZVO=W}N8&D9Lfi`OQ_~xN!v|{~WHW)gD99&L9a_5nPR_}W9 zXbZ75QrN;Yp_H6l_h3!&2mIAj1{1o5lJ2Ssv}$=PgxpOOv&<$*ori0@ci%2fKeG$w zhsFqosRwDHot?Dv9nR}~YH)%|6I!Kc)!dh6DW&jP=&-jhw!Ybf%jOpGwYFJ!g3{UE zuuc5@=n7T5k^JJ{mtfooIcP3zuod!m2*SY0ATNlFZ zP3HLAzAuDH43XgUyTYMO?XXQfNXThu6duJ57Z$I30bOtcXKH4`OWR^Wz2*d6-YDgj z69+-g&IGo;c2_hwq>16GuGFW~4_N-~HkqFGlTu&D_=vR2N;Eu7N6)_`^&#Kroxd~h zsw$_q_7{cDrjxkfRVr!T{SBIZI&3?wun~5emy)TpBQ6Ux=f~re@z1te!K>1mhwdWa z-MSE?vm0h+2BTd6q4-L=S-yJz6r6N3;QZmMl=SZ+4J>>_2U~`m$?E<9ERXh;{mF5I zh6&Yh{Kiu-sW;}Au{}_IdIbHqo(NO4FVHzJLr^o?Lr;BQ3B^03=;XUY zwB~6NuDROq0++}IY?tQ(2mvmN$j?63KqlaJT zHzvBmC{Ht*`Fti*`tC=ic}3WA!d>7p>eJ=C{`{8rrf76XTBDzzg({~TQFqcMoS(du zpVcuF_zO{39=3`(9Fj1<+z6<-+s2d%C$Sj@vf^oX{;=M^iXgk8lqI-nOJB(66AsW8 zd@-Ndng^=V5%oen?R5nlYg~w}NA7S>4tcW;5v!qZcOh=@?PXJ*D3H6mEwheH>=R~lG=;((^S zb8Qw~8<$3&ciPBsGtchqi{>*duENg+zqupcA!PI=iuP3Gk-@{+=;hQOcZP2TkMUoa zVby+G7ZSvb*397QH5A}@WCeaPQzpf#ZE)oZ2&{%+m|Q!8ng;o)M+?A(ay z``1$J%1X|p>OM}qe~mXi6Gs*=&cR{FzO1M3d6r%n30K^TF~u^UdTvao=z<=0@ckgr zlFvXht^WK))A2C+ZvZ%G%cIh_XIT8HiqjZeLleF+t8fi!MmzG^~15l~72OpUjQi+>_kpf$Hd%z%YT)3PWZwUa;E!F&qz46RI-5am$R^d92 za`^6%IlcE0W;(KSggmjBd+4-;@B2QLb{QwLIfWm%1?R8e#s8ccejfox67tyAs4*;J z-)Yopw4|5fOfSa79B7Oa|g&o=%2*qiSaMf#hm08qYUuzbRCY5xk|4DX6exD&)B@nK5SNjz~G-B&a&Ot;L`CYC|-J&Uo|jC z)TU4iZDsDH(QYo5GaD%0A#Q@mkkizN`27U-(50`$kYVrH z?dCSL()$RN-a$B0#~S9i3NyGoed&bmG&H!OFCAW<2E8j7oNt{D2hN`3jtlIc%JOQu zvb!(zWMzU|`eQc!XB4a&Vh`q;wyY)J60V(|kE+Y;>G{$^oMFA4f%an@dTBBnUH+t>2e4j*)N99#n+gDFpJu_!v)r*+QGJ{%kXWYtT=VGp;$8KJ^byX!qg6(p+T>D z@%NaGG~RF#SuBm_HfSBi!;)I~b6JaqB`?Awp^mh(G>s2=yOFXV2f?}vW^_NklG=2n z%zMUgP}^C-uRm^$3PQf_#;rJJf6mx_Ax9xm?*wSrHsf!fX)ruO5zL0`fZSFK+WftWm41K0es3s*xx3CVC7&?b zt*Hwd&#gs4w*vX&ySwo3JUzV8zL+Ark$XGhlt?~+FmsUustg#4*9RVj@s|?#3x{26 zb~_EghQcV`q$Z0SJ7Nsi8MUA74POlp19xykF1VBVi^H@g>nqNEQx1D_6>x1!n#k>7 zE%zi_EN~8*&~5xR^!0v@K|i%9s>qr~D$GQ8zkirgqz!lWc(Qwe$5`luNBH7HIGgbJ z2mffoLVA596*ozJP_Mm$4WBTTDxO5c+Y>$%m#xBPJn!Lr*Nnm`rt{JKRHf*tT`JqL ztB{{9IAwxAJ!IhlTBI7X3R^p+%rLz_7iJ;sX4Iuj^MoTmP5ujiF(H!oabJ$;@dRaJ z_fpaP8#J>g6dp|d&A-$4fac`;oadE&9H%1#6fH1J5|Vg-&oH>%&9KZ}k6I3XkQi-n zrG-%mLe5+h$0$~cCVsa9ue&1j%1Wjv-_zXQ!lBF{IvusnM8eyKd=_7mC{cX5g4UZ& zr|#SLv1HU6$?@S!X@b2WvzasiY#wZa^%jq?K`nyWhW&@~X9_7uDUsz{OcAw>FMyaL zS@P3s#qxhn5R>A;^tONH1HyaQiR2=V?(W6uq4QyKLLLUMxQnv!d-$cY0(-!9i^NUc z2uq_6;ikmll;~pwKazGc^Sgqlc+4;w<$M6DZ9Pd>$kp}>j7PK&ho=cIXm+2sxNF%I z`V{g&GW_;!3IAE2FU?mJEs4BA$s-8|?eitqL&EcIUCMNA1&856d$i@F;2r(J9LqGJ zi(bMMwkp9LM-R~4$HBOdcagWciG~aHc?atkY>#9m3~J#;BklaDQD7(=&M=}D6>Uno z3^Z=k05V$K1U(IB$@6J2AGh!b7cp=sTVZq&otqWunn-G$xed56&P5N5Q+grX89TZlPOUD%x!p3IDsz@xqfy@NcEiRvzWg57aqE)^*OX z{nr63yS*NVxlWX1Ip1K5aT%v;nuDx740IJ#=(@#Sf!F;H(?Sere|R67p4tfK%m%}m zF{?>h62{NL=sM;fN$^`b`v!Afy z>pEDkP=?p%2zdv)To&`v8H(>&J@e*p+C|2~)-KjT>>Jwl_O{ zEvd%wTpF4fKar?a+rfETOO$v2jNkY(v|5~ztNN+O>0Tp_#nTx5XOYzs`i)`RZCzj;-mu-6S4~NbT!jpY%dDn4w znCO@_TlTYr{nQ>zXNP)$$HCFkPcIFnA&ab0jkh6%Dj8Dq$s>QE1La#CCS+h{Gk$sw zbe$0VV{a`mv2QvwPXmhktA){9Mzu$5Y>tfdZLA_J-hKzR<`z=(+w+vwn@DPN%C$!Nt@)D$9@hfbSW5~yxETL<$%;pL?}FYXN3hTCW>MV) zOSbagX)K4;{MiMWux_LU=YK2>xBu+mT!v)`PVH^f*e4!8{1g1mjd!4>crN%3KEa*0 zet~*=w#@OmIrvXYr<1RT!-$)sgqh(Fk$t@xTb(>w+$&v9%28pEW;#Inp=>(a)NTMP zHw8!rdo|+{C3`enq%Hldn*_BdBy{-71{!_Z9I7Whofz>|U=HrLhszgKp!;|q`01ww zo1fM}$TJ)2|5+1Kht!clS{6H!e}EtS!34_vyYa!ZNEW&AI5XN2z^0C$3a%-`;qxj} z^e6+W+NUHgTxlp>xHumAMcu$Fzx9~Ls*N=5*DW@s&V=$!R6#E63`7p?0M+QlP;S}` z)6_VceZ&hl{jnEV4r%0X90V!T&yoJgGBmh18I%iT>C%E;cKc2o^c@-FIOk#`zbZiZ zS>j;YyXYV;Sa+B6RT?IKXLW~jnLHO3N93`*!4jAz>^BAt_QZtxv$R&)ghodtc-2f` z3a^MD-HMt9fzpQ_6_i_Yg%zyZL#e%5(yJ;**c4wY*yoVP-hT2B z_K9UQc}g7f8&}QGbyX7!YbUl;em=g5m7)IAazqy|ZKR&01ZeuV2+FzlkXM3HoS*1^>k zcUfbHm?GERU~hxZFe9zG{MQ-2q9WtI^z?->nM9ppvwzRxAgw?Dd&YY{bLTxay>u73 z&7K1uisxaVW*ZKglES`oXVIoS5vBPzQM~>nuGZMjGO~-8U)P8J3H+GhE$gxQ+EExa@GKiY!+UZ6P?9*W~p+1@K3g&!*Q3f&)8vG)lfi zgLnHm>ZeeT=3n$U**_y;Yt2Zs-7=iA2N=ra)!Qilg05(++8#8kt7Aq& zH}q-OcXp(^RB~HZkye>YQRXE`Iu(`SZOA-a*YZGM)l|@ih0#za>qkj7J7C{kMR>JE zm9iGeO5MtVd3Rr>bx$?KwmN2*)%yTnZ8|IJxtWHKp9f&v6XK!_jp#?42i!<~!jE&F z4*GY-Gm{=0*6)%HP7buh4c8kb|J!g$Wi|_uhl#w_Rgp1AWm1S0i2@T*m&ha{{A< z*|@j4naei5i|cF#Q&ZnfWS=;OUREEbc@fK@;6^#xgbsi$0xNgV9ZTW{3cgJt&-YE> z#&v!0tLYH>Y&`;};YofU3No3?&wTleGjB2gm%{OEu9YsF8f_qOn%9HlB|XviK!E|< zwwFyB^PVgCaiS(dM+N4lPiCtVUGc|%Az*rZ04$F-fy1>Wf`dr#CtP}mMs=^*;`QZR z$=}K9BMSN8j@}Akl+P{fLL^W!=oE-(1-LaXPMa(Syh><1zM;;F?^uirYU< zn-<&m@LOH)L#>c08?t{oec2)STs&1t{f{HNA2SjBGJRR~k6iAW!)mU~(heG%hdWNW zRlzB4EhLH3JNCS~9*Q>^h!`a;^xw?hHvaa)3eF;GStU8^)U`T-mPPG1L1ixjT z0$eCkBEJO(@z6L~W?j^u>IWsU`QBSt@7@n=^wIUCXJtjQovNaI*K1_u{TA*{nZj-X z(Ys_t=_cKW+-J{FA+x1T{{7@IQJ9M*jg}=&UV}>)=9xZYvth4U8Qd~zXOkA>;qXU} zY?{!iPs~0diB*rM(pBd$=u{?%?)%ZORu9%=AMN;H#5Ydf;sDIMT?t3UFLBeFeH6rR zWBs(%q`}@Xf@8x6IKg+S7t~)oSnC*lZn)16w4Eq^Q~e9pn#Ry0Uw63rGM}~GFow?C z$zW^hjgx;Hv!&RV6mOW*6NP8YHN6T){`f>Eb|77C9Ye0gXK3jMU2yf!fVtme_+I^T z=yaL^GoHDDoq+U_C`{nTg*k91?qz^q%@ww^uOeLccBJ&Dv&5axylCL%Hul9+hxBKL zKy=km_+_*jBbPAxZ6rh0$BuJuqq^Akz4G90{{q@Bu3!mi7b#UUlP$e*5`Hbor^`(h zu*J%lzFsz>)!luh%d#Isz&|Tgeffc^9&Nxic0*}frY_{yo05X!I;zsY2u<%zfJr zUgroLwwMIgPhmPf>|;dnyGwYFPio|GxtDiO;qi;VnBDy_la2cC0OpTK#O+s0>5-f} zR?lr09Y5&>$(mPK(ZUScHZC3VUM162w5})vbv4f8@rlahHqRP@&P32D+hF=WcM=x(z2Y3UFW@`H3INf%U~R#}j_J?Q zPh$-Kd$S>$Hc4or)_t~USRg4Y?`64*YuMp{cX%;ea6VaF$DCPCaL7cFvtUis<7f{N zH|^NwMJ`Nz-)I)2oX?E2Vwt?pMfS_M2s-t1XsjK_*0}n?`^Wd;{x!pD6RU|RQ=iN( z=ch0i8zVOO$y27dyLnU(?EF8l zcTG=Nv4bI*r=7%4TCwOq@-Kh%TM@Q)cyJ#LbKs%PSZH^T^Ke5-gh4B4|H=c_CDuM z2ux9Dfr)$E@g)`~ZU*mKmKw5RJ8n%gLgeFD9j_Aoj7~||4MeT z^>1BSaM)rxP_&Hf@0}n&_iKEqOgmfBp-OSd&62CCbD(fH(8Y0L&c5Ik^PAnvl{IGI z!tr^0T(JhLc8X&oRR>F)OJ~!cyiGp#|seo_GG9Z7F8+N6RLIcAM7_xmi`xWSp_mlpxU$m0$3p;*K zqrFTeYbmRIm(C_V=)m|r0Ti8+N>~0FGuC{aS1p}`wx86cQ(YX$VCFZb93IE&vxeg@ z({?6%@Hx}alw~*V?LgllNAf1vlbxzpVqa%V*yVN3l^4tVk ztroGLgNLBIof3Nvlc+Ex1qXY1a=&!?LxSNtY#XXiYCWm!eOo8$Cm-g&1&_qv6K?Q2 zebdQ%q=Z6J?U>F)FPb|mlr89=!P3GR`E`t9=C2QNlP1hT7=N5HBHn?+RVQ9+eH{Bd zMTa%N?n|HgJ*J?OYlL^SB3LOZaU(-6vHszwFeOI|%1b4zVs|;!Es5i@ce_B=kO5K; zvr2(cf-pZJo1TVV!0p42u_gaT@Bn78T>{Qek=>c5lu^>PEb)Cn4J|ByU-R$WC(?BZX)(-CH`i|L%_ z8Gg+Aa*%nI%LXhuLYnPMnMP>~?&>v!Pf5RU$aoHak>Ql8W*zU4qdvnio?N;XPGWc#dq+RO5Z2J zqyx^>`+O24J@tal*0XpfWF(F9HNaXqYjQhO#dhs`NpreZpxfnf(9$oD6viH9218pM zuUB?3XZPFW{B;YQTy~fG8cWI9YdgLjGm(PJITm&`g_=$NvZKZ4dF{eDzO%wi{FP;c zti1*${BWV+imuFEM-B(y1xSDC2~mo3sM40P%BKOWctH`nxN{*jjXcEcHw=g6QN!?L z)jPh`{S-Y{dq|S?xh#t+={y~-U3KZ2CO>6ffz#n)11Vj%cU7HhDrTMVdVst;7<7H}!Ff{)U9 z!gnbFXRdCFI_XEy{Nw?C^#L39w^s_kcVEHS>Uj8;lucW8!??*qjBu>Et$18jCw)2> z1sRv-!w4%A8e5VHJ@t3!;j@S2|8yg5><~D2{tgU)~Yj|I2R`6KRz2N{#?}Om>=@#66uYnaWGh?^+ErQpAcaDh< z!0({(#uyeed#&SPQCZ5XJP#|%nx*kwIU z82>7fYE-Rhol_s^C-Ve?doJR+p#y1_b0R%f&*XZ;7ciT6J4zL}jUzq@x#~6Q_}}Cv z{50?zwtGHfp|9t`=(oDy+$(e~V&}tx9nWxl_$~6Cr$O6QGW2BP5Yn5Y1|KF| zVBeQN!?0&};N9q0RH=5L7t>VeU+E1vAEPh+{Wb}d()v-Uf*%Xtd7Bk{2TGJ50J}E( z!$pG${G?a^L|ea4l^&B-rm@$wX~B${sQ)-v;DA=tz+HQMb4yAOAAM$(ezV2NfzHJ1 zp2o?iyrFj5X8NfkWa8>|!7)pp(@ndMQ(mjm)8Uix*5hq#*RR)(gKwyS$-_HLu6s9l ziMrTB!^v!rs~oKORxR0}AIa>do#kemD&U4rM(!Qy)X#1R*$X?tY@zDZZ~9$k@;aOS zXFCgz@0>{m{b#}H%XS!aPv}_*gI-I4$>Z}Sk_TUT5<6dIrf2?&oGylgb6_A;yY2k zt!MxZ@o=M@=iA|`>24O1w~iKn=mRcIzF<5vp04`#hiPXM+4gn2*!1qTob{f;Y(rTh zb69eOn=*F?U+XX%i_+D=-#e6Ss>0x=_5rqXY%LpQlt{N%x4`;`byR-WgRGiVp;Ebm zqV1ZXd+T$E)My4y(HgT~RWUh%3-)d3TAb0NK*L^HlYY=vvPrlOCEx$D45McLRlsD{ z_itY|aegH{Sf)bkn1s3~hEe+`Lo__bB*%ZpHbweZw2Pg)FbzhP_oF8k-Tb;{Io^FQPh-1du-N=PpYwMu<@g6c^vTVv*d>78 zeH=ihTSwrV(H2zJ+6aNNH7Ef2u&1qr&(s>jCoGvp%7bc|ep)&@?YYZ6^0(oo)hXz+ zPn9BC^qA(FIQmg83l6g%!!Wxxw%|Yw{#A2hpX!AiTtx~QW|)&&Ybx6>I8CnBJK{#U z6qd7NHWy{{hK%CBQ(o=~dK8xquAB9V)F;u*u6T@Ht&Vc{WVun>#_(6})?&si9mxA8 z2Pfj3am0odX#BW~Gk;%?v2to~PPUKureZS94{xWja}L6)?~(k)h4<0eKNO;jDw&5) z8H`vPfPD>uaIMl~$ddc#=yT$=u(#Qd3uUvQqUR8$MVw%Jmk))Vv!-$VRoAf{(W|i^ zTMBcM84Qyxk21qrLf!ek(KKjK$ew(uo6+0b)UV95VXgHaiMh=5xbp_1LVFrEZe2Q~+ z4hHROvG~K+oQsK-VSP$c`DqQ`C7Pik_9SQ=PJ3>Tky@FY?Q1!H>Z1zOzIBGY?@z^= z=>@11-oUO2pXT$R#ppEu3RQ{kvr?y1?9`s2XnoX}UwrZrn|f!zDB5lsGxHuuYo@1= zvY{uZyz3!1;QV3S<-vn&jL`i}9zuE90o<_Pr8M{K0xW%Ck5iTyLDZ@ac2RT|<64B@ zbJaCbyT^FQapd{w_s8*v0#8t*#ST92qc&Z>yb~2n-Jo^Xa_)owRUD@Bm~$A>fQ_ay z(3oHkvUgUacHV7_IDeIs^dvLOk3^=$#;_}=pV;Q1iiGp4hod1&Xo7qp^TIvs>Prn$ z*Ri4AKG$&S#d_wN{2Y^vg?re9LeAu6I!kyS#WKd4FvXGTcgX%f}H-sKn~bP&h(>}2z`*TUowfHkxGu^HQq zsYT`yCu;Vfwi!oR?!Y=O>5C`0$ll|7JUP(FR9LE?K7P8ff!z#^=B>Dgiha}OZb-; zx~MM>afSc+K=y};)H&1#wrxY0rryTXb4QW={1GD03w`ME>LYk%?Ld%kKgg?Vx$?!q zfvEkg1{OSCVGFn~zGKYTu%|?lu@&lSbQz zMS&=N7g}t#0KL&|Y@GNli^@GJdFLZy^A0A^`d5N~Vni<m4208{8LF>cIf`@V@mQPQC!0E?XVY)r{Q++#I(y|`zMBNg-6S`$8mErW# zb`)@*ezBuf!i>e@9S$A30-|{@+@14>LT%GH&AZOx;1(ko>0baTG>eLA!obyKB1?Z% z#s24Him_P*E}>g!ZKDoLfBh4N9eThUhqd6jz|rE;;sCH(6^g>Fi{4HEkaVo0xWjf( zap)&KYw;6%jxcAQk&)PDRZgb!w?IY7bgUaxfdhilS(<(r$Y>m;GVN>7XdO#`6XQ@v zN2p$GwSmBs3t(MG13X%OlO=3hj+dt@(-FaGaMs9Kn5S5<9^0>=W@|@d^HqeN*D)v* zoO=nP`S8_t8Q2})1yyG8*r{PjHIom*ztSdFV##Ak#5gu|?*Ob6m`0vovq)+84i=(* z3r?&*NgBeuHeq=smw4@GO&;lrJ&*fgKgaMExuT;^s28nIQ(zrmOl_PpSgwB);n`L zTX{2^`MM0ng*O)C_KH{B-9}f~DKIsr$*yJV+~#A!j1I>dqs6p-q!Smk>nOTw1z@$q zW~_RP;AUCPngXie(pG&^mPVmmS1Mdrv!dNs1^=&#jlko7${7J_>QJ2}IDhudYo6N4r9${1Y6lVILiqy?mh6+Z>!SuV9tbN>EyzjUH zes9#{&sEz)q(UmLvYbGgqK%MwVjM2>ZAYJ1>$reh1K?_l4*RTelGUCN(cqL>aBXTe zJGMd{^W&c3rL|qSa*iC!o|A#`Z@-B~UVp)-mFwX5;Av#L^cQ}fp2eg~izQ$C2%V+! zsm$@(QxNX(m=n03uASUQV}f(hXoMQsux1kfxyf$aF2O~Dlh2>;6*!-p!05(3mbhyV zn)I|tN{Zvy%8SvY?Y9Io8yAyJ=5FYCro}fL&t%oEgQY(YT;j&-{iGEyA7U$~4vKQq zAg$SptxO1K*KiP%Ia)03Y9 z%dWMZ_rIUb%ZyZGul2XFiqH8tS87HdZv{c=u-|Cl_KIbx&Y%-k`KVSko9dc}lBrQK zdMadaca+j;oUn)6KeULsFA*HF2VL383mF)dl?N)$UF_Q9L-cscapuyr9vw2iu_3>t z?6_(N$E~oh*-`S8`|tTp^e>8GeLOy))4+7eg;VNq{h zxwkgEkfBD#U*Ge#LeBPCbbLW7aAFA7fB+ z_AKTyvYQ_fC}x*N#0zf0SN!{F0c4|_fNOl7;|(Djy7p`~i%Y9xHv1RzDms}Wm*(@V z>){{dzFB}U)1dzRdoraEZHlPuXWzr^U|N*mDbk2@>{!{wH>(~L+)s_nyGj|^d6ybljqlvZ){`t^x2801 zmljxd#L!y0^q67{ z{$R+gAkg#{oYRgr^qZy+X!qsw4W{S z@WPyf-V~_#n+rBuO>X78uugdz%9NR5HkX11oiY4LyK!`OqBU7hHfJwd58-U<99H&h z0xUjff(|#cDQ3$v9OoVizT8dvRIW_7^_S4A;0)BZFsADh2f(Yr!(iIU+pPKdF)m=R zJ>R&Y1%IvY2a3zO*qMd>B#(Q>QP%A)e%+MS5TB|FQ%79Ip!-g^>c9Td+rfX)%0UY4 z&CgibzB-(+E|+$nt>tQ@fi(91X*Ri$6V4vp*k_RN{SHc_SE1t_N2QLCURw~x0=Efn z#U9{aIiKf;TwBR%tn>uV`xr=jr$>A8$3X9${#2!^3U#*US@DxAZ;?EEQ%zvaPUaI|nPHzwko?7}J*H z$p1(pq!p*IytB>paBK?x$yb6a!zQ!ePSLdDtHwm1?I-!%{xigrZl}@<;qbOPQMfZT z+2fUva4NC;L%E()q(iqT5Ibi~c^ThOT{)T71Urz!i!n5RqbXTT-NJl3`iQq>4+2H` zd3gKJMuJW?`dU0tI>sXoH(LqW!+w2edEHdX{gO(X8}+!^(8iu+XTUYtKDaN!36?46 z&>%M%xM;Li;P-E%<4(rhfid~)|siuluP1K z?aX7ioYe6BV7l?|0G_*80JaN1)kN(*B6P~o~b z18vELuzK8QX8q#=iKU_ReTD(=x7C4`zSE~aOP0cKYhCDBsv&)!D+{_B1ElgwE+BRI z0W)4U;P;oD_XbalALUZ0zWp-x9Q$&jzuBpS$DKIn`2I~r(qzzq2N{d-MV&lvc- ziig|zM6+2}MDM?tq?JK;*Lc!}+0_&=u$H~d-Gc54ax`mO2CsEa zmt?wxGs%%Zd_vYG7PjFi7*BpGWC31r4YvPKwzigA-!c-N6gE*>;TamDc!+93`jgS0 z^`Lg3hyQt@FIZ(Bgs@}%@MVn~{eBt^K^-H(e3d5~C*|1YKtCa0c$E?|?I6uf4K4*8 zMEjBJvHO7m91Kz<|BJielF4E+cvdU0@-*<%7@mLlTb8X`tLxaH9>l7xJ^7piv0RPr zSZ1j3k^dK0&$p+B@9CkEWuzlO7lk@UW_Hx!FY}VBRpP#C{>NqPF z61E52?s>3_Ie*Y@!C_II*FC(Nl#S0TPq6DN{kU!1Rjkp_g3tO(;p({=(0%_8T(%3x zkL$*;ImNdn{kNaRieIv@R%tmq|7SUuIBpa-=3o@N88;agny!JKJF?W$F5{^J|eo^>4>L=I3sHXr<24T&$WXAe`|+0FG6rM@5A zS#G8R9@p$6s_s9M+kJ_H1D0X9$?q!uy5k91xi3%0^7Khp0{0oEz)v&9>GMvsa%83fZ#YwdWRfEPG7c-Ee#&WO_v` znGexXV%{}hxl_tb{H@sIOkOI%j9wYxEItt<|0L5e?+hwa90+Fb4{%viAMly0!`S!M zK~lMmK782Tk4*CCDZAv?Ry}0<8DfU5BrQ|Ny*;P7(MxkA$BfQnXX{|fdASd-cnpQH zHMh{!%N=Ro0g?Dy7vnC?fE~~JgWN7<+Og;?3mYEGYW3{clT@Ksk?R5%`|iZWs|y|D z?e8!zYZ+Kk{vTK9oxw*}_(8*v05}_z54=GXsNT}0r^5z-lglmGUbq#CZi->kTfv7F zyb`1-j-;VAlunM9(ypDtJTtkK8{fDd=e)_RDG{Hf!t4uZvEn@3YSJFGU)BX37p|%DZJ`l2B+aPm%xVHZyD6P9daYu*K!qYKqux%U~`%Z*p{n>1e#Exap zxX*`VgH*dQn`vd-!&#GDaJcMYe(TIrxPR~pJTXa_!I_U>lN<#`k%tTJ4NPZw=T*_u zc_GHT%x32l=73*u939Kt2kYL265%+Ty;qf%Dp;(Q#SQS8Am0F?5F$ehJF#q z>J{vY;vPgc&J}w`7F{r@8$qUGS@qF^ulrL;PxGeDr7%JNoe-n`$RG7AM=WKQqsPsAfM@ z2c2cgrp3&AYAmU~uBGuRpP9M)I_Sxl(mv+|xCDJIY2Fwove>?lEpw=5(>K)e=J<{&JpTX# z1y}T#vtRI(rVlP%WX`L;Fk~+Rcyd2FLfX*%2D2-y@zu&s=!wmM6*=}`#5sTu9%MTl zeE8~FWz_n*olg*U4>eEc;Ox%LkTq)_H1~hTmTY*)EtEflMbVE~)`-1yXxS!+n=_L9 zM=W9)_Cati$N@yl_R^BRJX@k+L_JS#QplhV7QD-soOTz|S-vk^3_p(#B)W8ulfk&% z%{0|}A0@YGk^gczX~F(0WE?K$b280YYsp1+viUT>v}h}vKq4;xZVma1+`-Cjgt(y9 zl`Y$`nEnLJpyjnGV4S#sOy9g_N0YyD>dC3Fy!ZmIQNM-i#>?T6V|lQ4_iPd`6uKCm z?zC&y4C-yGVKt%)+~ktkbf+qe4LEKA7Jf?N{2MN4tlGtnJ(xq{tDl$&w;unP2}~-1 zX_@|G0{KR2NR6-OP-VFT{JA0r_gccaUSD_AEK?_=xLY)#irAa}^7va-N2=DSCtguJ zf|8?rVNd2%tbi0||K=7`U$~L1`@F`1`E3xZl}+7?MNqUphppUGOs=q%Pi!+p<#Q`> z$F_Ma&8Cc*#opi&G$P<;up6wf_{u)5S^}Ggt3mo3F->@KjJww-R+L$_kFV4;g0g*s z*gI({EZ%1Ws_U!iY4-t9{H8!Ct;_f?_XT z!%fR0kn8Q{j&1dZQ(Je@{c}k|_{1E(#(L2dl@zo+6~k7XuIJX|SBnaxchk|si02ee zQtIF!>L`r?y}ylU`D8WQu`dg!Qmtb{hb)|4Kbo4AmtyV4O%#|{$M3foD%yMNIde~K z!b!K^V)S|)n3MFIZ{A=9&l^_LU(Ga7TDy{XTN4}-c?3%ihqJz`^O$MgcvO6x%>CM5 z&Be@(#Ic(fvX+#U!rxzuGu@a3wz&;BeT*G8P4VLA>>Nu``CfT%~hJBqya#9)6IQkCdPWeMrv%qlCwt)hd zWNvhO8EYAM05?^Qqqc7`crZv8^My{0r?d|^O8u$1do7!lN!U-hkOuTSkIys?!qg4j5_q(Xox3YIKT1pSvB4(L-gO^;jDCP?g)Zr#2azn?qz3-8&SslN&XP=>v>L58 z@NE9a3wZqDLLB?C78j{zadTcwW*5?bAS)U!bO?JymFG>s;GPQ2J3n8tKv4ws0zYW} ztvX8Ia9w!+%w^kk#hjnMhT!tz_J^ zEf+8Fd-j~eIg$+$leZS+lsy5ww+gK0hl7|4tL4oUWN@TSHE!M9&M&^9LJ>(owYvq@ z-El)Kl8L~b-xgBba(!^NGNptq`KY<;4r@6dK@oN3bh2VMTk@a~x2?#*KQBx1`I4ui zxi|0QtGY`p!_k;5=6JJb-=m@P{tz%zyT+C-RfaJ?hQjGwfB=^iES$Z8-^s7zS9bca zoaRE@o);}~6YdoYGFOo+lcCpd4C(B-VAfrI8Ks)h|L@Q~7cv(^rZ`E0eyo7``Iqpb z1&{St=YZEVUp8{+X0p!HCF2b_)Q~-fa$RHTltDc~;R9%TP1N?vne=_`bo0(&=K~D3+Gqc?uAbW`iW1B`@>8c zo^qL*WdeVD9eFy)K=6JCe){!z{uJK_?td1zJ6qOrw>piXwd)pZdOw~9mGL;YwuCk5 zZr}xPC-l^WaWfVUg9&T);Mv7`p#4M#EUN`KbwMG<+RenJyQ;37fA7367FiB0*U_ea692J?S1uu zYxwsXm8HPC<_uyA53jPFk{r@As>7o~?yw_%J4|bSj}e-4@O62kW75Kl*g4RIcyC!K zN$%#m?iH}f!|ljYnE7q8UqWLiX5vN@O(3mHs5ZEmjk5d0RF(`9cJYEkY?6ffbuVL; zeSP?ePa~n{f*Oi%-{rg7Sw)4@YDe|j8i@JT|i-5#KKxF78r?E(2VDt z7^|nI;#XtfDR4hLXYClG^j#8h>n#q7yUF*zU5;^v7kHCDLT2kz78koz;PVFl;-E6%hea?BE=M%0UdMpME z(&LVg7SKZ}#7WKU!Edf#pzxp>)OD`FXP43;X~943hw zb_dB|>1T*>e;`c>N8!%99{6sRHt&Cv3#KdfgI;F`3h@uX2^OuqvRa>S_|3<2kCh#} zuZTknp9lQ6eYKEo++G2dhxmkI8jR{O2hMG^g}<`~VEus&Xe(BN!PgO3pXL(lA12x_YpQ;+XPqc_Wu zwl|8aCu!4z3unR7KO4h;jlyTP)3LbwC$1=t5?qu|a;>=$_E4;6ALHFP+gn@VrQ3_I z4D+JOmX(5CuV;d%nU|o_>mFQOcnp5FJ?3!+JMhZEGSN+OgjIj3V1<)q!0#-&up__KSFex*d$GVn6bjbikx{*GaWK8D;=g;vg z?U$lQoswepv9XkV;Sp|rdJ2abY~YgkR6J5joar8kp>K+CeB@AxZLOxztM%iBK}x*# zUKp>>59RTnOVH37`PsP9*mrp{E|qrucWocUKT|XvPmM`odD2{*UK5I!EgT(Z*muI! zpCfr&S3Nv4ER60?8o>L{OFR05eA&y~NAB7C4#IL>uCVB&xbN=*>dS+{wd#O)B}GMX zYgRNT<%RNu^IDQy?2dd#$_egwzY#9Gy$8dGQ~1rUL3rn~z4*4IKVFz^3C-%2qNl4X z4UV1()gw>P)2Iw;-a1o!W^Rrn3wj{bYe7QD3>^GS;@n$UI<|9gAuIWQiiUMufBQU) z3C*L53%_N}Wk=<*g=cB0O}e1&H-dj}`A6*!o1yCE$&~N^m69ty@%-BE+}|!8XRb5n z7h_{F;N597Y3PVOBe&DxoquSEls_oS&ZHhYqiEiCGx3sQvbgAHg%GYN5TngUV1Lb8 z__F2@%$7JB22yTiL)aQz*QFG#Y&v4jAmE&QQ_0gEEL@sl$x}{rC6BFLz%fz-Twe^N zR+DSuvA#L{^S&!B`Iv|!G!o(eGmLD>eW#*hiB76^>{@sn-0MFIbKYCf?v@O3LEdcP z>i93>k$)eAJ&D`ICsh~3d;gVDbJ-M9?tYDY+pT5i0%@mWL4&Zi`*OM&8ptY}jCju1 zI`F@+2^L=;A*_sQM>D?d63*-mgm?d*(wXsdWSbsz!it*(X|Gfm_I5J34e!zjmJm z>$l##VZ|RfW_g~!o^2DxMRej#H>JM%-~>K5{kX*3s)T7RgW>GvU*dUh4aI{#{jqmK z0uS@D!_EVgQF37M7mFA&QJyJg{tLwlM_s(hd9$wJ(o5RVU6{w1|JeI5Vn z{t)_=tY!Pcr!fD*6PWQniW7&O$EPiY+_Sr?qBZgqJzPcDtJ`1jjtiC-sn-au=N^FD zp%N~scENr|x@@cUS&Z%w4KsJ$7b1rxa*pm4j5@xLzoo?SgloMN>Appf8Py99E^8!J z=b^B-#1=iIJp3PvdxC%9bcvg+#b2Yp!I%FoqH(Wyem^UOQpGy>-dc}iCw~|SQ)rTG8*7+8(*Bylcd-`DY*Cr?)^F`P? z+D=I8uvAdl+YOpz#S#~wCy#r%j1u~Wu=k({*d4P=d^F+-yquE@?fbi7($ggIy~!t9 zdDR-c?erb5{7DwCiO+@E3lG9aA9e9qYK#FsB=X$ z=$aCp)wms3SGrQf3vFDp{VxpOy^tT58PbmR#q{C*9nrnLBPDB17nl1DH#Wa>9+yW4=8$)V`q@Do3-Z5I2aRPy(zV9I#h z#qsE>PIzq42OOifkGzYvih_9w`z&vRuySV(G`hhbH=H5ejR{=1Y#46%7Dhw&I7;lA z@yrXQ{A#y(xa)lv)}6Rh?9|7R4ZhOKR*_yIjZ2<>u^7UPTbYbHy==CjK(CNN} z!@9l)i}huoSr8(o>sGS*AH?~4yhz37F8M^?72=XAdHTDL@c;Q17>PKz(<#BRsZ_oh zzwja1Bc3qH7ZP^Q=KoZr9o>dn4$a7frS2~=H>8QehwNdiZarc7&k^Ff+pD=lMLb>e z9)j=grK5NEOwv>2fl}#3y1SQYlzMj<(*7c;mO5Yu??rrJ^AuhdsE&!B)`)Y1&WR7k zpCt3RYOz(vUbuKya?Snc$ZforO2vz`lhr}7d>_{8SmVLEd@fb}BYgbHRAjJQmfxn~ zINT}%b$DDf?q7=)a09@lflJDLvZL{D=v3%q+>_W^guWCLgEh?7sZq#R}|u zU^aMe*oF)HRLNe7H9S${Gb!n5v(2Uhq!!STi#IGrv)f#09Z8X|}H+hK3J@4$i$vT1JE5+*bLE zE)`_jQOXI0xk>LCgk1yfQ0%Hp&^RRzuH=-`z)7+2#&D&$D^{SQ%s@Ebqm_OyI7uou z$J5G#|8Xnz#HK^eWHsp++m`AJt53WZyS5+C^7~Wap6xAa3%ViZc#~mCx zD+z7f_3--R1>k#hl5F_-SlDValBNV-k#-+0iaj-Zqor>T^tkhr`ffYHzrJTf+=gVF zaZibzL)77+%|GxOb&`@&q+O81CE^&}Cb@i{7yma;3k`m}q+F@*Q+MqiOkf4wG&02x zw?wSi)P@&_YE#(08(a_)Ol}Y3Aty43BIXSgN4Y+t!C`&r+?QSaQaA_$cBjF49W_eN zG^9W0a^TBx72;VxV7zTQSFDUgYlW8hLgGK`oOXlZW1fmZj(5Zf=flC*WTfc-E?iK3 zSO(#<2E)qZVQ}+(JUJCON@w0{ynV}d4EK6Z8q2NlziCpx(e@>4&A-U!qp56!E4q#7gGbw!(KJ(Qet+ho-0Yz_-w2%~w6jZ~(3xMs)ZJUr z|LP&$KhBt9>#tML;5rDnv4*`rDbdqSQF!=M5B!nok2kZwU|8p3EDbln(S{vq@Z^q| zUy;i0<<|Us5%^Gil-Rm&J*f|P26l=SKs{&U-}CGFjl|{7jK~oGwaewHMla~th!9cd z-xJ~V0|hh;9}k{C^(hX=)9M#0+|ExGEpuDQ{;#yh)I*a$U=q#O@#m_(37qrVl7}j+ zNbQcq*LNwQM{25US-x1lI_C)7i&IuS8gmzgJBS5c`$HeEKZ5GwJ)o|#!XeD9k7Ck+ z`9kdROzKu~8IG^65+D9kz<=Q;P#%>-F9!$Fs*HN!^Pv!Un>>mh~I_08o z>HzAPkO5tvB*1G7qHlKYobl=rxDP53s-io?zuZooov(_jujWGQ-hSjaawD8jn+<*R znqlg~n_~Q#SW4?XoSQ#gr%j$K!LU>vizGi;zD{TEc zX9#s>HFDLFPPAmdAzunn!N+rSK)rXZ=vlFM)X;l<72qL?{SHmQbFUM`3qAv>Q>r&V zpYU2}`FI?x7Mvgp$NOY9Yy*$|T*74rov~Ef7w&O)B%Z;?pwqXUQ@xaUV)4I{=8^Gm z%QX>wl6yklual5+<^`N@ISFFpZgjQVgHxw==ee0L$^3U4*=MElCB-Lsw?{+C&n-iG zb{t2u%2(vuqrKw{<*&T-_By}|zi3@ni8$g(u)4 znICtrpl{n0Mw zBQ8|hCA_D@@LB5-8QXSMsJ$u%bMHt}cG5)g<31tK)`(T#dZ2Z|aM``iwz##v1Xc7@ z+2!_k>R1xZOYGC}--~*|TiW+`UX;%rTqC)=M;PXe4a2_c2vwV}A(0S%vZX+GJDu9<+3Kv$fuOS+{K-z%!)xHi$wo^b}m9 zNuYmm1KrxKAm7esq&}KD{waHhZi(@-x#nx2;-)HkC%W>Kf-14hLxx`QN?0}An>VW| zs5RXPjg|d5XQKld#027!m@eG9L=~T$?1#A)T@<$Kqu}wK2*H204ix9=$}*Z0Am-CuHcb1LUe9)c)k@t=M>GAcKh#bN&?^4&uRsr{gzV&tXK zs4DrXmA9W{4>603KYswN35}A9Hx|EK@f4NvQu#y7RjRArhXE%$f?7%rYnR*-mB%}? zUUV3V{`2W+%m_TRcO&`Qey4s{reWHoHL&d}(~&)EIO}bD$J?e(5V^gn7!>De1D9z4N@E#7R`td8&3uf-?Z+o17X2+To%@*8j2sUX_LYJgY-FMzfiTzM@;&7NFI1pkDqLPPc5-U ztf~H+uP2O!!r>XnCEvxVqn`;;V@cL%=t`fp^F?=`<@9T(2Y!5P!qrNd_-Rj*7DEUUhE2Lsa8NHpyCz)|TnjSlCI)Yg}^W_-}_whAp2HAJ=M838tihdmk<6fmbVN8>> zcb@o#gW9(WV`3JtNAD`(?t;NoJmIrgG`$BqpBTsYn*8a_E-kc$>B3Gm3-(_f!0TVB z@!a`Y+^^xSsQFd{Z9UJk&h$NKUgAnN;j`IVwU)ZO24TVNqvUznlY9O8CzwC9g}YVK zxzRI^x$umYCOZV05m2(5j@tr(2J)V@K16#Sew)V+|Rayw1(lhRws?_ zFaD2mKTYJDMOv7jcOP{11BAK%bZD=yHgDWJ1~2)xLcp~cC|aZn7+yr`t82u}6Jx|5 z1^?jI-W)PCJ0_lp^rPD+Zo_H$MP415}VsrN|OAmubwC843sB~g27jqob_1t^8az}0K3*kF5w z@Oh*L%q!|Hrp{6UYs;a~S>u=7;n#EAp_vDpKZc^wnqAx$tczpzrqLhWL~3e~vd05A za_Ql@_~W4hZq10{pT@|y4U6f``;)ZR*Pq(dO_>keC)4Ri@Y&@Snd5__Fyf^)>R($* z<>}dYqw6kSeqICXiaN04vQ6xnro{6-BE>c8eLbULVW6|%1kMJ-I%2SqnFPVC7&2Dg$*T?O^8 zdQYvEFCa*<3PzkvmOPj7FnID`u}0#z;cz1Nh|N&F;~wc>_y=>&y2`u%s)1LlKR~uk z0gX$YCFGw_<=2mcNp*RIaPFcJuSgGtuTQ-(%_vUTcv?@)Cp8XSc33>Jb~}%WS%t%H zE6Kd4)d~Ho4#?JZJ1j=M&Js;MwvybrOt#T)1~!~~CJvLBO&62y!u9Qk#X|v-vxTn0 z2c6?k?NlfH{@5Sg#tsM9@y_VA-yO#|S-`)Q7BKOp7R^!biIerGa?rD@!knsJFte#6 zua4OSo1ce};S7D0dp?Ds-?X`<*`D6sUW*ULrlN2!pYlwV#36@sNG5T~+6783_rfTM znY0gP97tq~FiSMuKMb|cN$%xWHsrK7QfTjS3y#kFMc%Xa3R|A65RY`xVB^?AG1=}W zOdPTl%7gu=SMZ8a>&9Obx2^8YGo@>HTF8f!+y2nkr2}MLs}1qBsVO?#jD<#SmbEQi z3!8uKq_MM(g6FhVFxvlTNp4OaJlBiGX>%;N_%Y8(i)a zhJ~$iaMDf@raGR5#$mVNYMl+beAmEc*B@-P;s>`(A{=*m2^u~tWw-Ftw>Uhc5xp5rY(7^kA*I}y3ReY+l8vo2r!P$7C$d6^Q z(`ZBAEF69O$EezRGaT5e%lm#r3YvOTnf~jF%h#Tg)tc&p^VNL%+ouiKp&B+X4}$Ni z$GEEd^O91XXu(#0#*cZQ|JpPu$u%Pq&*8Mm* zzeT)wE`wYRyP?czuvjSdV_#?n5(FqaPXD5dopzak@3u=+C%I&6Mrz{N7z4-eXC-Eu z(*|B(smU{py5gs8zbVppE`9vb2**}f^1~m;;E?>siFSlamGisV6!JJTv{gfzAqEM-&+g` zMt^0luA^x-*HZDDUE<3KcUqR$0%{qfVceEEg3BDqeLgi(SUb5$)X8d)4-M*pUf$ne z^Yk<-S|`wtYx8LOI0cm24&rH5Ge9Y!1N!#9#fkwp_)(VRdtR%Bxu1+Vwj><4ov%U1 zR~=}v-edW)VHwiQzzgzCB85=nWH|F5fyZKXUN7auGnWLwYxB++`fC8XyqB_f-IAc4 zKfon5e{9~b!2x|^!7wUUw96LQSSek8e%vs&oKzrr0(4=wlqs+cse?^9EE-Q&@5XF!~00ije&ab`pi4nnGP_x?iH-kOqFlr?mWz?P)N5|=DWgu zdi{Z6bL9Zk>G(saP4qd##0`0t#g~ue9IS5)7T*{_*o?8opT|R8*Uh`AIdxKbZ6@jZ!uv; z5}7pBf%WZgl>0XwvWD!%d(Y?bon8%~vb=;wZug}FWA=)&13g7vJQCI}3I#dE;M2jc z`0>k49O2oD2mD_^zj>>{@078WdGcqw;6Q#8sKz=*`zh*qHk=7rAS!Qe$0j4x$>eMR zw>%ESu>a?@Pt1oK<=t>;tP_4QY!LHmpHj*D)fc1ZED@G{ek6OfzdgOG*e-vPzFM}$ zM}yvZ2UGv)p+a#-ZQfnJTb^-n3+&vrUTks-1jpS+>G=LqxV6WFFCDzkdjAIU?jb>F zqw0rZ#2w*Ldwn*EzXeqlfwXvyhnRXInA`l_$+77g#YlVJB~RpJm)k(8ss)gpv=1DY zmJ278N5Q`L$=I{)3oe4eg@kkJR5M@~1%?kNqcLMe^NGE{=#3Q}u-PDV$w(2} zjXI2{o`&Js(tp@G(+0=gx=7o!%iu-XUSYutscsz-gF~gAldae1!xP&U(XZkEbZ>HMZsC$#iJcqN-AJ=dNIg&!^HYos&2u$xNFRSD3$c^v;s zn8_E7Zb0wfUQoGt1myHlA_Gl-QX4%?*75gH@^WOEZJ<3qJ?kXuetsd0>#>pkkv<;! z?Tn_Q zuJN2UJBibML%iH?n6PQ6CGHrRMT>GChy$HgVxjXBSo|=RZWrGm*-v9|vsP7Hx@kcR zQj~e0?SA4$XAF#2;#sEJim(^yczEh$Z2e}Z~0!oTI0=NS*>a_V$k8pvn|}ud{`|O>pJfdvj#ZqhyMIJG&~z?Mk8J zji2Dy%1RobF0lGI4MjrHFfg_(;Dc(t9jlEbzTS^>Si5Zs1kE3TuWSb@77dTZ*6<71 zX+%AIT)GUeKGgs-uu=HGA3|R(`~Z$f&-QlDMNkfb){H%TUay6BH@K3=_d#?^WgdQ7 zItIr2Bl;%}m)L;q$@a=zao^`#XFH7oBJq0I)C9u3D z6OV52m!GMRfHl8o@l_{@N#;Kgw(N7@$i@F*qe>|RzKN8)F|jOPcNM!1?9Cshcf@JS z>~Oc?DXOT*!soxwgRAW=Va?26;=`f)IBsk=uKV{6Ze^D6VSiO?BoKa3YkJ4#!#Wv@B2Rk)bZ-I6Cr*K=EV4R)9MDE~TH?n>o3WT!zhtvX~m$p?UxVK80d?gRWTKPymzFJ@G<=l)VW(DvWJ=12aBu z6SSmSvpnx8RrMQzcCWe#Q|i`|VzUc_TW`nR_W{c{?1F@iuY~U{mq7V&N7NYCi2gQG z)>>&N?p@tparan0_VVn{dj@FpJ1Z9sS*q-K_mdqzzm~y1KjUa-S1IF*!?<$KRor}L zH)cUG1@}HmH&XuJ6Q~kfCTxbCg&TyQgBQYz1{um+ev#F6Gd{589<4DEsQslCFhS}$ z-rf96u$Fd%zIJlr_6yWt{iA%27R&K-OgL1J-pSLA{sWKxy}4+K)c@#W45p54!gv>B zjw|j%Lm#&a*++kq?JZks^a!Ndzyt7OMmA43{RG?Rl+*CfDER)(l;tZOlKM7koLzpF zRM#E?waI|uA0?bz`i-0emBD9E5p~e|L;I{$aq*K}wsLOaZ4W-vhz}vKtw|Rajn6~N z^In`V>9TYW?L+&2#jw-#Xvx#<2}%aBbR;#DQ?k^r8}rzB{1#+-ET*Z#Ud=x+N4= zt;O)8me?}Ll+V_R(CI=JB`rv%-!&QHR(8h^&3zdPJz)G3$&=A{9u9uA6>=_j;HqhM zV$%a$hpA!m0=8uQu=QX(Q+i3jh8-$+SN8y$O zdtu0#nW*#10R`LJusg2|7eyvh{mm4bWOPPs6}`n(BedzK#EILWaUGg(9i_Hkd&HVX zGp<%Q6JnnCp!uWRap*`7*oKz0 z6w;#Cc4%vshqT^Mu)Fz4!6~yBaZ&wW zNR3!bz4{*&&l`^uQtza}z%$b@zG$R4=GsEkI`E2=HqFH}>uoS)(8^@=zU;wLk>-U$I|d`9dKa!9?0rY z1idSD@aNg(5I4k?9#630whxnd3VI4hB-YvKD{4F^uQL|fNHt`E9u~}856|Y>u?y=s z+BkgW?wuu;#~^!NyRSlU9XL_kZ@&#*AF3;n7`4=GO_?m#{tP{NtAe{!GU>s^0iYo} zPiy`jf~it2amlb`T-o9Qi!W>Nv1Da-Tposh#`Q3uo)wCRq_c%6dX#!Tbq`N6zW`yTb3lEf(vHCW+vUhWr_&ikcof136b zywWp5nw#Zu+p|zQXsUzftQ{fb##l_fw_28T$PMSe{6GitK8h2zTSAHMY)smAnM005 zN*Nwknj>>@ob+WTjBU%5GG9A{lDpOLxyG5B8_L1Tz5}>j_9MHWWwJZ%N>KQ*4qvS8 z$c0^>(rWiMNP0AYt|c9%dbcKQkN2s6*FzkU{0gewK5&tA7VUR#2svlWz}gStk`FzU zV^5mEsV|3xv-9(X5SLuqm8_3~(G`ACISbsIV)~>%+UjFcthnodE_D^+WeYFV8%{IhMFMpyWe{ww5e46pFzGCbiOE!_R_Tks| zLX)|I_C!YDhezu$V&OabJa-N`_jbn1s(Yd9eREhCUVnbTwOwLPl0BD{EXDk3dWt%~ zn=<{QJz%+Kd;T7$1=+oi!=o|(;RwCMV&~y~VS9c%g~gOSoFp0u8`t=e%ZSd9{i!Qw zb|mudKNu~NM_}dlGB%A_3ELl=a!&ivH2Jv>T$cKjW1Q5m<$WewUDALl8LQdXfFOS}UVYSv9aw788?>ljqbx~NQXa|oA#YT>wge#36q>4$I*DI1>f#6oG*3n z&00?rsDD7Akhi!E7S73s@w0RAt>t2L=&_!^j*aJImOp4>=R<7&_Bm!~ISBWqorboF zTZAq5CSY-I6*%-Pnui>DjRD`f@x-Pud{4%#K5>xPYLdofomw&U(H)^_);5|{L~Nus z3;+Jqhd*8_cquo4W3AI9uU;sBf3Xt7Y<6Q+?g&teXprWRhj8eOcrwdWM;)&KQRz!k z$(-GhRJy@kSQKy`>!TLoh`UPoV`@GOZgauAju-G>jwfw1t)}l0d*Gl{ALeL111%d@ zkodsDlWUT5f{#K#D3D^U0hG3t$!+EOIKa<=tK7Y@@PjHgYj;Jvy;phB*flJkn$LSo zbD+z+IWV$N1qTOIP>;$s`WtjYcGzXsNcX^E76uRGdmR>n|3yD!zdmFf5KWzYo2kge z3I_dEf@f0Q>F#-j=1tsy7ozsSyx}G^XZ=rkf|Db;IcngZ$uhB*7ejwfBYBUNn_-k` z3|j9@#>rA0e6TJYSBxLbZ*`_o2WhXRZ|gDYJ9#$@)KCXgr@g|qv$w=xmu_HUzl2jC zc44YpU5{FDqpxOJ& z@XFi>`Rn?Z|L0R+{DLBC`uCVhr5a#@QU>h|knVHcEGWjm3x?}zVcOP6K6lBQT}_iX zQ6*aZkk^&()mftBwP^W*f=a6D(^u?LcvJGY?1B?rHSt-dJ3IX6Eb+C2@!h9CG_GQ&KP}Iq#=_lC)}j5j!97evm5K~uYtJ*lVJ4gI}~^RIUHP)B3tCx z2d1QK<(VNH@L^*$jO+d8qPgoEp>A=L{QX&`>@5Sh-}5+F+TrQQzM6J$aZV-b#wSZG z<)Lt_?hI|*(@6V8Y3v`<0d(vxLHx>O+TDBqgB09>2LKdZu)jzIJ!+)Zn%3Ttkbt6la9~8;p%jlcA`B` zi1VQ?-@lTxeG{zydleSksDX+1w76;2Nh<5nS6HRi5kfpC$6k_~1a_a(|mYPadhoUHb>L&6#DY@{5 zPJlsL9yH(T6wTb4O*ihjpi1LK{_^f3#d~J6lVxXoy|e)OnBEqioq12D0i9&UUZ=>D z>|tkx0WDr9aS$vS7Ve7`mD^1Rlhco2mWeL-cZ!0w1E%7-)?ch5vyfOZqv+Jxt~e*8 zH|{xQOhvW@kYjs;hB`XXma|{s@6OYrp<61+XQ_!-x}OA}ywBXb(_c7NS|s*HNAb!Y zEl$q91~(7pQFvwwndo1m4<1GkWK_!clxFiZqZ{b2YoTa)kcGF}^@3#^r9Jb#H-u+j zrYXiWl%d2wzz(gkG(4^!j%{cZSDpJ0QaZk)n}sz{65@t?Yb6%TfS#OxxSQyekW3nB zox~u+KY+FFFyoA&n6p9IaY)bwu$n%OZzpQdjiGnoU{`Z=`XvXKx)s8Ki3OmneH;xR zo!Q2n$s5xwrkg5|9&%YU;G#dz-0I(dD4t- z%CPR2l>OONMkn=S!EM_g@qV*Egr3wDQ=9rI(siGKdFwey8I*|42a;jSkegC{u@Cfq z9|fz%d2m9m!7{IOFq~k9y|*dzwFYnSFU%;r|H43=-Q4?de zLh(!MU7UXEDz)2rfYwU0uGLpV$o_kK{#N{nYJ4Z-kg2g;xicJ;a?;6pRSOIlI9gU- z^%;gfxWw+S6X0>Pv3S}3Fm4PVjGsHE&>i>5@>@N^@!5nt7@4sMFEozh{6b|Z`}K#O zO&W}&b0Lw{U{k;>&K|df9WK71UB|lM@1HKxuI4EY zJ-JdykKMrI_PVo6#~gHBuIzXq?S#1XzcFMX`ISO?w#TSEFJ62v2n_P(VyDV-xy{+z z_}IXb7roF>dpD;Mo)t(QZTjgjyJ6}+nzn;8&D#&fFFNbwF4buM^MUQBq zrtApJyODu!zQ==~QYc$l4dLr*(pkpS23K0|VGY9vxVwEjoa1zr{Z;jFWLpZ&(-_H0 z1N3Q%O#*w}{eV|zzXR9xj(n^wf_FBzbNnJ-?7D_N_Xv@6DMR>NoPKQkl%_ktNIO zZ$ievYFe{tKdv?nhv~+7QmkY;Hmf{>hkMl(OY5aPgKscxF~5&L_P-UnKiffR)gN)K z?>Sob$^(+;If}_+&I#8~1kelBgIuAWE54P7QAAoh!BaDpM$3l47lYpXbFHPLa$q-V z_f)ERRK`P(e{xxVpaFd{+sI?LcH|fFYL3p&MUGsunevai&owB+s%X;!N* zc@Po%6nArs&7Hu{KCI%>HT$T>QQOgP#9@h(8c#!Abor)+Cd^|Sy8k>y81&@={!}dF z$xA!a!Pouyb-g)G&HW{u`L-Ce$6m(E569u9Z5!xf;BfNovIX^SOZ}&Xw;-nDd|6n( zbWSsIXZ5Rr^lC~W9n`3z&I61v*m4t^E&fc4bZj~KXu3SbZ8(ke{UW}o9E^51T{viQ z15`u^To`Cg&Cv__yY*S|VN8vDz>BwZW8G^?AAJ~>jWNZT&)rbfRY8k(UZT*O`#8{H zJWtd3k1Fa9@m2SBil%aN>E1mFD;s6ss}l4dJxrRBAowI&X%_W z#D=ZTyfShfs!dMi>fhbivKO#!KhT%HHWwfN~KKyEF$DO}uN&HV6Jmb=l zJ$gy*hSHC)V^pqiAhlXJBe8YgPSxi`*G|~!dYa_?*o=zPSIMXRuKe}2!DtyNc}A12 zgU=srdg;EO&}%Q~8uzE0FFJv~>sW?~UKsYlkbUb7VdE)H*zd2#9x-FE{LW^|$o<5F z4!N>Ir5qRfrBLfcb>4sH06d7E%;ncR(e)SG5C+zQ!69d8SsX<(qDDYrgp`f^@0*w! zvkQHG%@GcoCkd@LdeFTZtrC^Abiu+fh8vg4Wj3n%U}vSynNrTwFwjCAczg#;O;x6f z26t>bkk0*@D@L`}-jO+PDi+OK=g`@LdvLlzVwkvxieGM~h?Nl~@?}RKQ9#yM9OUHz zXXk0+`mRVxoh6^v(Ox)K{~_IZwSem{J3`o!Jo!0~<>H8JHSV&jmamrI7wQfKqVA(y zDE=`ScOE}URAbGz=2gL@>+itjavEyXPUhp5{is6XF68H5koNn#@#4#eDSX2NkX5;( z`Qy#pCiNPXv-W|CP9(lbaiY%>!%<`D5mEPXJ=tvOEsCLPdH#^B)V*mgd3>|!mnFR;<4QYnE0cR>t=Ko z#wQ0!b!QB&@rj24&!%ynmN6-f6JgTwzFc(TE)92yg6!_WV$cX@2tTnJGk?v3d5^om zU+Z*1F@Go~&p1SbjQ+rFiSID(%4EDP^OBrLhcR1y7;8?N&F^m!$}~NNKbJf5kPUx8 z2Oq$vnNuJ^`yS46Gv`NXyNZjKtsvjg(iuFm1CMu(p}husOL98~((#D*FuM7Jl;@RZ z?URmB)(@$ctdXu~$wQtXF}5h{*G zT+XFhNo?F}jQe62;{Sz~UNFOI^4q;kxHbd~=r-Vlu#&FkJ)48r!6&Xq;73gwO3Ho20Eelx} z2hC^R2-zR;pj@i5Ney!O;{>*InyXGm3dT^dI7n!2- zh0Zv-afYDt-BeVJ-N)@td-9i*5WZWZ14~?E*bh@=BYW%1KirxLD_V-g2bqb~$4eh~ zX6?i|3*XY2O_%6LW*Tg*+rSU~G+@5`8fZ0M;}(lzL2dGRVZo!Nc*W`qf4&+bj@asi zox(Sf?zmOBaug2Tyh zEKEEFXt*1dMODSdHKFw5U?r@Jjp8-GOX&2^Z*jPN;}NYIq1|$PS)+Am>_Wyl`697^Y21l{Y>N?{6lE|G+GFc9l_tnd;@N0UZVN0 zo|Y+xGpP^aSB9U^roSd8)}G^=KU&1&9rm$VOQZ0*eghWV@s#x$d4b=r?~7*9W;pxA zK-B$jGiHA2f~)Q(3O+$$)a~DF@H;pZ^X9(;r8QZCQlq-$8N38-PrI^_pOneo;K2KB zhM?u&eta)Zs@Igeu+Wgm-A5HeNPDIs4DKAXzQL|j{zPEMLZB!7N{7kAmrmmZIxALFO+^#T9m=)B`<{NF!L+bR`B zX`#qSMn;|cx{i!wlx!)=%HB#=rL>1Mv`|8!L`Zb*>pFxeTVy03E0OG!GJf~>_wRW; z`sUb|Lf^EAc{NvHT|o$JdUFgm&3-1Ew`2>9%6=d$oh@}Qg1bxnfs^R!IuAG8_ma6- zoD^~;4X&5^U0As`jGqimkoM@xpwEKIxH?pisuw0g_wq$}d!h==yksT2Jts*N_Z#t+ z_tVk)&k3-;Xh25~c(c~}jzavjDg0-w9IMt?V^pLSuc|qW`Jwkgy|y<#sCQ(ei=~2! zv;({HeJXZp_m@&StAhEqG~un#ml}F1=)sJ1p|om&Fr?2&@nZQB+8XqOmP#z_MFkdE zcqkR7#L4l~h+WX_-?fqp!R_JQqg1q${Ifo*hhc5)JB4O}5`%Ff-b!yTaoKu<_P@^L zopuYxe(VJ4>XSKXRSG;ga)=7_&%xT76bN|s6d$^ch3-GzQj}dWe3;T5{5;L@`I%Vg zUFqRaZy!k^W*dbp!?P0Ww?X_`RUtGdpXOxqwJ_s$cj~VmOg4YoVQarUMSAmP+A=r{ z=ay^YP{#+dA$tvZee@HyU-u2h|6D^SZbg&tlNa#Sxk-2&lS(r?N7D)4!6R2}+bc}J zoGaU&Gnri4NL?Ds-BKR1PzY?EBf8rjrn`ej;n%$qBk+1MjrLhX&5Aohe!~`aJ*FY~ z`9FnSLu^FFg}w+IPII5JhRO~;HoSj=D!P0Qy=eEkNs)2RpWC&6B&5{lb5Up{W?L^; ze7?T~r(T*$eZ@11-lPuRS0>3y$M_0=LsEpNwl!4Is4CuDvsBPu|BTv|pHtYJbcEXr z^XY4F9Nk*>jaE(S!4b`~A?N5LIHg#}Ro^D_kFX~4N;8AY=1FwN+LsO%y~TxH^2I=7 zi5>Z>6VJ_@B)uOa*=lbNR5{K=rS(xb-nIp=b*;gQ(515Z&a-LFTVviT>xC<$?@+bZ z46d8k3(N1nftx=2;o1Xp zEa|C5u;`T&I`vPc>(YCE`N4+@m8$~o{!~gMrCr3=2PO#~`%B5 zscW<_K0&Ct&{GuWIMZWmZ**;$D|Ok^A-G2_H7y)2*1nII^)GM5>jU0$Z!ZF9e6nePG6i{t$G0Iz8Sv7JSCn(3#ymxX;}}!FbXl z9+=uu{^NloSQR~lYbHhb!9ESAw6?)t4%MJ`F#_kOhvE)6jApx?_)~r?fk_iA8&F22 zhknaG-sy(k&0X=wAWxj_xR11-Jg1^<`IzEdP=d$3VRV!)#9p@*J>#qe<&Zf1*m9NP zkHnDwG+)%IIKbYYZi{Hm>*uqPMPUx4>fq33EEb1&}OaC-w|!{6ZJ^TOYzsT4oa1V*W&A5Dl@IsZ&cDOXv!X^7RLu9&uhXFn zy(gD=o-zeHAA}a$B)XVlflcp%7>;k_xmwFPE7pO^RgY1$c^BOIRG@X`O%Bh$GmY!Zy9_i<_%pd zO%jtEx^cAbJ838F50C#i01vy5hKzW7U?~%@=ev{}n45$S`=yBlM+wFo z6brL_1}m>=+l!{2+2T?`0mqgGft&R_QI`8vaCM7-gx%JHz1}^=FPS%+TB@Si=XV^i zZ6JP|;EyhAD(Ead5jPF8;g`1uOPe53Xxc2XSH?c07yAd|ibdVHOK1^&a*@Mi`>Pz7 zH=nOgm_vVN7U8Q8yXc{XFW<1ah1zQyabWLEsZY0%y1nRzcGCaRj+a~Ti~B0fHod{C zG!99O3l-&L`-8aW?rDm+=*b#>yEtQnB^7D}!tx$Oa}EZvP_P)S%3`^pyEksWeSmLE zTY^hw_u-Kzt+0o&IdxT4Qx--OAGv7G`t}##Sf;n=oT?7P8^^NVoR?%YVHjRKvYb2J zbbyL~2@5WRQ6XuTkaKHJn zXtFFCs>axT#^r$XAGd9Q>j z4cPyWfU8G3;L*(kz&pwXwndx}x(CZ?s+9rFjO~uKcY3kakIAe*y9)aB%7(o*XCQFn zMoI}f3$M!CW17h-4BM-N@u_A|c&;;!>D4IQzw}bfYq>&ql%}|3ptjQYP+R#V&3%Hi z_BdL$bTDRZI16JkAJC6?F*N+{NBAF~K`&pEPFEFDOuvB?bjbo8@(ZD5S`RR7SkGUz z$Mf_X4RH7EOLY8}4qN^!1x4XV?m00ER1fdsr{^{4%vGs3-)Jgl2B!i~Fw#_a`L7$MC5T-0d$nUWmh4HvQ$5W|{P`aj#&R zw8$a;cfQz9;uIxM+e-@?2XMYpl?K^mi0Oy5(amoH2ai6ErBAA0$?=z4rs%NXHU#BuqQR$S9PjxWCVo9m<3|m{{%8EjFxrXTTKoS$ z|FOdVGZ)S+;vF`_@a3dx(6RF7)HX4ST!%=$zwPmRDz@uw3tQ&|^JJ@OB@<_=@R6lmAOIFoQsPg} zDLMl)W_wfOwsT@|p%Gqv*jBlsn>$7~g~73GLq30MKNV)Yr%R2|lD@nR&AU|4f%PvG zucWT-hfzhY!#?VKlTFN!si6H8i?f()1UUi1*re zLIaDV;u7cEFmKaddKoki<~G>zI>jFRG4ZQdcI_Ah?Kn?E|M`HvjiKuVT)X}Mw)ZTdTb%fq@#Ox9I!LDik?({>A4Cl+E|(hC?H)&ZOTXz=6b$>jbv zoySynqc-`eSfyD*s;ogFZGm0dBsq9LJqga!%!IQ8u7jlmNPcvyEH?cK_gn6RCx1(O zS(ZM^0pGju(q$_6quySa>e`)sn{Nt9;jgIAg6Ab0>+@my@5iF&jx3mdyoF{hP=j~3 zRM_Zh8*G3C?)r2Eezg1!dGj4n{j3W1+PRRDMo+__OIe5+nIKPzqMnD{#nbEl%Dn4R z#Zf8^)KA`zlCw6EU)mwsOb=k^u_jQB0(Mi*eujPafgg230!pJ5Vu}5$2O)?FWWOk_!9OF)^?iD2g8Oy z=>iX2zj`;@J1@i=Qxot=kiP80w@TcU!SG<41GX+SVuiFpb})D%RexRrYp$B}vDiMC z_fy*L_!J7$+qGl+t}4`Nnr!Vav6rs(U%L0wV4yXiVp|RM}z0VXteT%5d8fg z8OQ)^e0SkOi{Z5T;%ao>+#X*}Hs!`G7huN76IAg}Vxc-egsO3W@l2KtqE#CV^zM(5 zPYdWx?N7mXi>mUD@-W|iuEx5TGr-CEufwP{Yk5-ddSTM}U3m9JCf<~ChuYnqqm$tl ze7nyQ*N+@5e{wz>PWCj!#iyl?^6n~X{wo8Ym@Sl3oFZPX%jV#^NFjXm6go0I2`Af* z;fXu1<0G$&)MnUWNY8B}3s-X&`fJP1*Wx&|4T_vq zi~d2R>$mq9+rUy3lit0?4}#lgbk z^)S5b1drQkqr7jK$2vYv^zi9Px|Y`gsw3`Uz}`KeI>8*09B#74-GTBMR-^G(brfVr zreOOuo-JUN;b&IR4VqYw} z{XK`BKaa-unMxR1_Y%Kkm@6k;_{^z~9}5zz0ykZ`MyuNYq`MB*ic*Q2Saa1(+5NaJ zKTjEjI|}Afmn)-5Ykz+%=qIvb|55ni_?(1?4%l|q|FVIJg7QEN{kZJ_o@bMwbh8sC zVqdd9eEet(S56(uA7(G$hU*fz=wPmR zzw05|aij+o8ktKB)QhCEtsMikp&;j6s{1W5^gq|q`(dFZU-p3}ZK>j%WB!mGnkMUi zsSOEkmNaCZtMJ3dmNpE|7O!3Xq{x?LQ{NyhdUp0a^%&U?+TXNf^$APh&s+<3vY0|= z6{k^*7kT)Pf7E?h2JP8qk4JyjVb^qN&(&Zx&oF&YqpdZC_g6G|by2+LeHH`YM?G1*qR#>0K z;;rg;G{PZFaZ_UegsqXzi=Wry$HNza=06pmOt`{v1+#eT=(&(soK9H?F(u+vqtDo zDRA~-d*Q-`gt#HtE-JHnUqrY**0KCgvClclb|(F2+utxoOc_Jcl~ z5MJ&Gr+Hen#A2R>g%vL;CnSS!uZl(6!=reL!5Gna zauYTC4W|05A++b?K%QbzPL*#}giyD9a$9~)7E_Zb*j|r>sKJLgFlr-`rl!(dt1GMS zHo@;*C2i(nFR0!7oicPAdCj6OoR+eUZ8wg#zqfumPMji4Tc9Lf%RxR);~W3;^!u^xzQQffuO@sap0xmh<_Yy^m7q! zPI-zt9WDs(As$vtI)@eA2Xc^&LWmtu1*`U0l55*%V(bt{tUR&=O|;`sJhmI(w;3;b zsZE8eAD_{dx=~c58A?Nvm*T*oW0Z3S1#LARm_!bZ_oR`#c_#?!QVN zaZ}Fj^+z~hY6syQuBHoa70|l-y0l+&iRQknflUrZut9l@Y%OXj{cwPgF&$}*o-T!F z4`9#6!K@-N&!lMp8NX+lD=!4dyk~LGV@bF~3Djle`Qi`D;8-sP$+kdHQllZdi=7hUS6F z1wEKHr!#hWwH1pNFT*QdA81{U7B;?HLkD+<@xKxCA>idj@zil=Iz7`#%IKb=Wh;i# zeeEo2KO;&U;Lu*I^R^}3+9zV%0mAha2I7C)5;4;&fgRuP!Q+#ri8HoLz|I@|IqQWA zk1y=TXS}7px|)=$Z21qQZDJhqUSEE-axedH*D|kL8m7lIu*#WTyeeWp?(}Wu&C5$r zI^XcX;pN=?aS*QH6#NrrBENl1kGrf7#@-K3p`5)VZd z(ATzKP^4CI0Vi@ncP|N6a|DxIL=yvW|_efZ3ZI0Sy z56OE$BqgrtLk~~N@Xmyua-Y_27%-@Wiw1ddK}M2z_3~3Rlg@rgFQdS{eLMur4x;}S z_Ct%3{doOMAh%BX#Q{|V`E`3qw>3`&mshrU*{o3va6OL8cMs<4-|F~j?lCkfO#-JA z?sTVJDB2{<;hSdNA^p);?^B>XP8>aX($Phx2KLwD(vuMF?{r3OY z0Agu3s8HCFVyKZ3m;Y55a}(W%*Mm;P3qsD-~AtT zG|-2TT?RZY_b3KePUE?C1M#iSAf&qXG)}D@XJ=k1`BCoyE4{YRqHnuluh(c!i7?>c z%()bE<08H?dsnYe zHi=)1NR~2RYvfr*M__)x7r1}z8@T=5pXGD(Fj`SB4x8TMFtuhd6~SQIc&7_jzjOxQ z^Zlg1(-TFzf&BP;E0j-~ijQlYL_>#Y@%u(&Zn`jqBcAjkt$)kOb#OQuuYE#W-)!K# zht~LDmLY$bw2og}NsL1EOukXO0GiGo!~?_U3nPF1hQ+zTp+T5Y#l|VtpEnn75NjbW(z z=nxw>pA=?|E#)Y=iQKQSfco?@q7G|;w@N+ja0`O6vq}6XwoDv5NtM<&XOOeUB3OB< zOc?sRn0^Hs(I>UBoWIf&$DMAEZ#S>PzTJMnxN!oUDNW?OvkDtcMzlwWZU1|OPU6g+OH4JZlOf|pUvAuIgzWS~|-*z(Q zM|F85k1PRQe|7Y}bPHuWz3@r@uk35^6mr(>XRlM=APJGKG3pQ0^3KC~G{49nR_F_?4lGN5#4)56c1Y2ATc*^@qF{Ew>{yF7=`@VdD z-$T`LOw&Y}`N$&}y(N!R9@tXMjudK$T#t2zn%t)SG1SIbY_2!uPKwUTU|s3GYSbM& z1uwxN2i4Ga;T=UUt5MwZt|#xU`NjRGEQ8%KcGypM6wU45z^>BPsc=t^o1q4bbNcWp zFGKF2-$l71CKkOuG|-U4J@EW_;zS`1+m4oW(XIpNHLb_EuS0mv&qP`{`wInL45P+Q znd0uK860Au5a&C1}!SUz6``Ta185YsjqR0Gc z=so;)QOSq8Zbq%>!E&#OflBjX8?nuD6JfvlX1?Rv5mT?fryq-7%Q7kpdGwriiFAhiHJ;}Gz6>caV&F~@T z(lfZf_iB9k_X`E5S<>4db4h1WPmJ4ti>7z*!Fi4LtdV1Zd)rDGFSv@o4aVZ_3_q+4 z?#OmG`g8X0-ViDJVZHSQ+|m62#%+zGm4{9$dLCO#t`9bp%$%9U3tZ0g*a73%Mw!FA zXSKlRa3uQ^;dmuf13i2Oh(;dkC~22BFLTc2M@>%L^-*6AKcFt)Mh)Tm`bgaUAsGG+ zUd{f?3VCMNIK2PwE;bo@;n{=()PHRY_}=i~|F#D6vZ=Dp{{#g(#7#Jxi~M4U)5UTTzBG-PP%f_#sJ0@VN`bW0!?&5 zn4f5%w7eNAu3}<)|J~TF=NcOKHv?3b=0iZja;)f916nR?cyO0eJmNS2N45#1byND_ zq@$kfU!l)`6SiW%ihq1wZ8bt=Pq;ibgtym>rQ2Nw^3*G6p@u>bQ?3_p56 zFw8DO&2Cp@caC)9qTWKFz9uM=BzSDS#~ZA zU$+aJH%Q#8T}gE9?mAIukoLaj+$B2|eG1L}Cw#v1TlA?4p;K;0FlbMuxZ1N`w)HU6 z#66YNvRI_Dcze;Q^FKJ;&$5p<{w$&jK^?mg}1mV-5MJ{UW2c}v&HSxLr}ikR_^{U7rW{|6Mn`S zqip4VVfUV~@IcDNy!rG&{2uJh+t+Q!>;Yd<%ecHIhS&XltBuTe8@5}r?sd1uv>>pPg zjeW3~!*b41!TVBJyXS(eY^*hXj(?&Uf8{icm-4|Kx6X%zga#P8sUPf_)2gUxJi$}! z-b08_EIql~o6Aln3ezS(f|}yt6q%e33*xOYx&I-y(8z`k^^1hS=zTE%-Z7zC?<2LW zI!`aHvY=}AY;y4!Ocu6};c%@J#T6eVhvX{gI42DHtm;7j<)7uaeI zfyTxU)TT6-2J~9b?jfr=?a+Q$^1M-;ve8JGyXvl}{$3j`kG~hD$LWe!|2bgv(^WXY z;D*$%{zsP<{1Sy2OGvxdnYsTB3Ln&->-P^Nr;ic*OY4@<;l*RPYxM{6eJ_g}Z|}ye zls4QLoj{8}I!U_|tKq-&N3_hc1OK)2V+R*?e*U;wke&V^{*}BvwNp9p_rnnSwUWf) zoZCfnY+1@=eP_mjvFZSZ9GWBzPIE1 zJIY{MQZVUTkfFB|IVe!0Vnw$O@aI;cKyl;-RktKrd4IN2axiP~5f%-sEle!dEXF0~^lZopx&Fi?{t*En+T z-x;B* z*Kewy@|=RRCCztP7GG66PHX1@&z#vFhJ+oVTT_oIF3okO!WVh`;8Bd|xZVhRZHlML zVH1G;9)M0vF!qgz!nmEgFefq$#h<&_^iMEtaIa!Zbt72YGhAXG&Bssus<4m#adh53 zKp3Lxj`({B_bwdGR(2=A&(4%%!fU}ZIv#z$?BayDKt5J2V~xeCl+J3>GjbEgCEOQA zY?_Sif+Y=9r5Z*&jDwJs@5HEh1HA8a7{XF^mz3Q*LB1uYph6SKfL?6lVZT`L^}HTB9ACv zihrvj=#FC=XO>DF%5@hwwj@eSFB_vg)@_OSqj4PWnzRqs=-lO}o$JNJg9dSH_+@w) zw+v<^T;n&{BYEeRzP!BLnG;f_U1RM?RDJqVSUJBtjvjObo^>)NP*6ZyG1z%5FcwqaucLcB)Dln0mT0FF?xp%*I=DmS9$3 zld#!Wm#x%JL7+vF?29GQ_z|zE#5{oqJ*=nLxHz18#er+>I?@=u_Cn?|D>`M72Ti}$ zij6n*@%x-Ta9+xV9JP-{t(kuv#AWYk`13}*-Ns)zVCD?k5#vsaeWYzI(d=URVgun| zup3u}=*f50Eid^K;?M1+?U{(a6KKhUt71dtY4|U-EC1DBNp1i7(~|!C$fULg4F2@O zOA`#_uUd^U_wgNEjQZ?gP^hSWppCm%E{D&tI#^o12tra#`LNXc@XQAf_L>N*X9jt35b zz1cl@Slc2VInoU-%#@3sS29s+t(tPdggksaIRY-enJDgbpGV_Qgkt-|wLG`lxFq#& z2`n9Tj_$N(La*F5Fzvpc{Nj!FXw_#8RvAB$JWRwXE9c7g+n8f&kT1O%rN&l?(k7tG zEyXgSt~I-nXJ+F7soV4uQMp6udfD`UuY=`d6sy@2!I z9_BZb4@2c4P1FqSDn>PT=DmJ_)aW10vv0+dSYVz0AtO{d>IOQpG3OoA?v99jOv4w9@JM;zRV{QVh)1nSnPuu7RJA=gS@% z^c8hehtX-T_2TtY)|}V38H(bJ&`vg+cBbplo$=cIPhEx0w@rZ9rkAAhxn68zxLYy9 z;RYBxs?c-K??OZm6&k!`Ae9_7#VcQSNdEE{xSygA2`&=na9X~&z1|(n=M{0c3}d`# z)`n;Fv&1~@LE_0Xolv|U%MZ5#rG_NKqm-qvTG|IR{_0C@);_@Uq()LWSmLPh<3Y1+ zFEVzLvZdA<-0e{fnQwbVKR+bO{5?{IWr_cUoU?(npwb@iO1-xsrv0%9PbyX}*2jKz z^%U@HA&#jFL%loC1*2L`xUcRAo$Nbd?Un8rsx9NgAJ5{)5)(cZzKFMWTSZsl3`i#| zY;ee;=clyf8*K?!-Dt$+^{L`7YaNV;vWC>gZ1JC+1ssSy0OFo@c;0Opdd;wu^;GI$ z)9Cgb@aGCIZ;6EnlOS{(cbn7|J;+XWg>s&Sz~5V=&|*Xm{gie&tuAMR&x~2nGO>UP zHmFj}n^G#Q^n&$m&%k?YeR0cubuPUC5cAsvBX@+s_FQ9{b?A<`c3f|qvq)m1-dK*Y z7j5OI8&081ff-bMcg00@xtw)%Iv#JEEzET=QoefXgUi1(L)X|4-1B28C$y;&d)ml_ zeVsBm^`wcI=4+0o9pBSwZyT}diqwB=E*0w>yYkjL4~NS&*T{NEKlwuc@eINOJX#P1 zek&s{L{5z7bqP;IpQV00{Mc17GRFr4`iJqiAK}owo-yEHJRk6Wg|eVCuw#Fq=;We? zAGV~iwaPsCxyR#_+x1h)BUa!cW6}vSMuJ7f8yp{$$^UvyQhE;d$6@Y$l*gk?>0!n} z`T*C3g#$(EKe7eh=D6Uas>$?e;s@$KLLIMMm^JvsGG!uYx?m3 zPW7}W?>~3xWYu0U*jx`2&96~-X%c3Q8L6DU?+-+-bY+9V`vv9eV>r0RAN7iN!#bOx zVvTOJP&Ypt|2t4bar5)}P~QTyTDX`CeWG!V)*aY1v^)0qNfXvyWPUC&07l;Whf&89 z`10rX5)V=G#vgmIs_PYW4vFT4`3~s#<}-rPPHd^b1H0w*1LvrnD@{dna%wFZr z=bbAdRjVD$bNh}q!@l4%gN2w;qNDt<;WY=v?!^NmUrE*8eAal`j-!GyC?mrgi(XH_ zH~9x~6|5n{`f|mzdmrHa#HIKmF^$(YOyu#udhrVRJn6Zg3Wc&g5F{}*CJjqL3ok=V z_8h@~I{R?Vw-EN)q$ywRCgnBrGI>=8f(!b4**b)ab|6xBEhNX$RC%@kCjg z2Iq8Kj-dgIWsNcMs8eo;7pzs;*Yr48%%8;(E~lXC{cVsfh{AZsA2jJ;E_C_&lU`L9 zq1zuld2cD({J^2TvigMuxjMhWnA}Pm^xtpnqqjd)rALsHp72jZaCMg4pwXo zfZY?XQRs68f4mhY^$G(qjOMUe;ccAz>pq`;^_fZwuF;gDp6paN9!Dk&S6#(V_^-EJb4fB(_-yocY`|bz0@T z6ueslxicoC>X8Iq+HwWv8`<&A8*==5`5oSxt%I81Hc6cN-coKf9&Kms=7Gz4uzZi7 zC>AHc$rsr?>BU3#$;#sCDz&6*{7;}pD;g6x74QDb^QFuS7w*E+9muyoJYlV^HmIsr5B%{P+)IxZ-lW^ocWZrikGmj_x*<^4X&Yfp zb^~nkxJ~PHltRCW+eOzq*o?@{WQwHO6;V5gAJ55;ahk`ueO+7G9B;# zw52iCCY%{(#(4vmQi;wUkj3w%Wf$l1>#;3WuW}f`>yJ{F~DS)VlRVJ(FTu<896NWVfhjVkCU2@}&8|2QL z{Je0AO*bANHkUsa7NX(7e=@()KKO9?Xe!?z?abZ|gGbc^`QOJxXi7gW?U*Er-cM?U zlBOWMP;!a7EGQPXR;tR~*0}KCr1$JGlIddyRpselNpN*qs^HN(36?Ba2IHKHq`plR z#dlZ$2M(U5YB}(u7#Gr;tW@~zjHL;tQD8Udm#}Z@6e^ieDl|OlB7BH?Lq2c)N?sp1 zASUSigGoJoN$*P<>)qN%&IPt`)^9!MFHQ%G8Lgs4K_q;tRLAkYC&^Sb0{mX&!<*qR zsdbw|VtUWx)a%t$Xuc0NI>mDR`XRXX%x4G~xnCIm>$JrEJw^^CAU3w6?Xe+ zvgOAaFex}1KE@Tm#_ZvAw!azw{$s?IW#y!2U@5FB6JX-R*?eu+HujnS5MyI^fXN_l zys&%%tCtRD_e^tY7;eo?C1WUE;zV}*k}KptF`}Tf!=UxH(c$It{gALK5t3Km1(k83 z^tkZ_EbK5)`7h-c9(C%%EtSJCDXSHi=<8`$8X8dndaXloyMq}Xau0!M?O^>Bz{^zd3Nyz{bC{K)-CjiDh_8@7yh$qvG@%puf%oCjByIH2>K2juD-gFgqJ zqyYtsXuGh1&CH*ZkDEPDTht3H)7#PO;w#*DLCGM*!G$T#gzr=RC~ zW6F4UcxD@c{qKg0+sBx~&Qpbu<=up%B-X{-lt8wx{eXGrzr!z`PMmfx48Q30#5Yru zY0u=&h?8^iW7p5{_t6TBd~X4%Jp|Ai?g3u*pK$Pu^?af&L5G!9w6QT4uDPY~wMz*! zZCI($qjE0pd)h{MFfx+|uc?O4ON`{bI**ngG~LDvdN1V<1}+$**#KAK>)3ui|aV`*NNv zJ=4oO;W(ud#`=DfyrT$?_qP)wZ1&=mU&EA3if6Ivw?Aa{B8)UTyTJ4ueO&$IINkNO z<&WZWT=Qf!f45vlo97hbmYH+#Z%Yz*ZUjE^{IfX7YbEyowSr;|kVB5Bqgk4?$(QFL zyx4sd_jwqIm&_gu2WKC`dp_wfAvc=l{JRQfDSyHGaWHA9C6mg5>2lMQqiB6|8-EEO zEqQn$IBReEm61^txX?O5JSv$ z<;jlfsRQ8S%wD`bx-08y_rQC`yYaL2aj3au z!B(9l@BfW0J}j{3r1gWa@l_=E`R#%}{UnyY{s4ZQ{SEzIYA7wz9I(Z4JKJpk$JzI1 zkwy7s!NGhf&Y5~pe3zMo$v;=%xePOMSZfGzs4i;m%fi-(DDgs;4nA#qg^LILfM=I- z(O>H8-o9aj;-Y-$@~MhUBD%BdmNe|^wgjh1k=o1o3JBJCP1|qAK~7=_r;ne8VXux; z&5ltJkXZ|@_7jpYK+Wq`nEN`ETu!pMq{&s0ulaya+%tY|Ofhk3AP!2pz+V~)VV6r21nz6(-sK`|<<&|)+)TV{;7h*JeS7Q7 zj(Bg~Ha@LWN~ff=#T!Eva%+EI=(QpWMo8JraJdJ6__+w}VrSBSI~=+11wZsRoq$XB zri!ZigZXPv9~kSphpmJT=v2H9-nBT3FP5K#@q8_Kj^2oa!=2 zka8Q=F(YWWZyFtu7{M-cwJ^lOoEtl*ajfxJ`lt+d=;-zYKD%xQyYPu%TV}>t9V{r} zOENmTXv^37o)taqq^-5fE$Hf0EdE$0z;u5#RGIQr+;-VdF!p{dB$>35V}~t*qWY|G zI(`5K_npM&=cmGhkRDJu{DPSCJCHOU+MsRjQ*^$jg8nU&xIVN7pFRvAyQYq~LU~s# z9{86|hE1h<=V4Hnrj2QFTi{*uL|Xq%lizG!53tJ)I^A5t6T6&b*V9Yb(%BLR*7s%k zK_&D%uL52tTtUNZ1vOP%71l4;#Zh}4c&EuGNw3%D;aP?_-_D4g&%dG_zU?tW*OVNa zA7J;}Z|LmrNw}&w9q$?s;MHd@pi#zhD0omJm=`WV<;ja;Sc)k`mRs`u1(&gF{&<=W ze!S{PkgPN_9+y_12hn;h7qoHX{fDMgZs{S~^y9s7CEymHKPrd88SR8VdyI(#6G?xu z7N&icaq;`!u*lF58$ZR;{I&rQ7PyM%xu=m^%R^H9DpEvC+qx_FX(-)7JM-1ygK$Wm zAK&SInLKmz#qtMddEK2e)UNA#pb9P2y|fsnY)VjEPf3Q?!(RwRW&jE2b;x(ChSIF` zEu5_WKZ?%8ovOBr!%?P6Bq1qLcrz4{!r9L{O%f^^D9uGlnrJ3dri@W2q=--oDTTAw zItV3c(43)B^Q1{def#?dT&{EN{p@G0^;`G-5`A8DayJ7@@!rRIg2%Uw{r)$a9k<*F z<%cxb?x=E@>-89SRGg#kf2PvIdNJ(QE*)m#ex3c9SS3=~wuOHbAY@OkW^#E~g~C zlT>IPAmYuQ1>oKFj?l%gVEYm?V3GVqT;TDFS^Qm&FQ@8PK)gA@o=r^Q#yXs&P=ym? z#=)z=nb>SJm%d~r5M>EI-w6?TVWK8$nkwc}C(ALT#RJ&}x$Tl#(>1jIV>B6Q?U#JB z{lcyPTZe{2UFdkY2)(w4v8(F8+4(3}QQ|3MXxOg9n>vaZnxDkTJ^k1KE|J+96|!Z_ zigz^1U~|svP)W@;oHckLDd~kX?bI(Qx8W<+Ck1g+!y>4)eIoc=y8ygfCo^;J;?GLL z@i{|!^50>4kQyhcN*%++3R#NK8{06#<|Ed5WHQcVJUcZuoCdg!qM=Q<(SBVAQ(QKW z)a4>X57q_2mEU=MQP6o*E%^lgZY}Ic;7E8=IEJoG=)ssZg0r&XF;(2im#jJw$bNj% zg#H~1VfbVXf-6wPB}Wiei6p*V!;=S7TtfMjKwOQl#f~b*xCklKyxN z<9%C)(ZryUuxP&v91^_czeX8zoqpHIdZ;z2sZOV<0mtY}e^W48D8>8Q4dl^qh0b>S zuv2o%pshBDGEb$U=i5c>6E{R^|KdOK*B6T1@^O7&+9QM;4f}a*(~sEw)&M5`UBGT0 zJPC3AjF@q2uyB6Vqu+peu&X7BpXC_>_HGG~Vpv2&XTCS>TG?Xv#r?~;)Aa}1n?(6O^9sOSx*K$uyT0j3Kdsit!BfYz1(0zs(T)T!{!h&9| znlG)Wf5;SK)!=GL0@}a5&+P?GI_}3{+?L1e)9j;o(yWm-K5>BOS=AJO%@0N{?+?L) zw}7bG5(gw`(@0^b=Zr}eJP9~Ml0}bb_BnwA=PnC>n=PS3y@`5?p23G)5zN{VOLk^O zcr|z#jYypa=cYZQ%{d|D&>#y=|E$T%Ium`Ln1bCz3#PfDn69qvCq8s_7khVgJ^LXo z#~BK8=vX-zdx!U@znaF3sh^_*J9s$V)+$MRtPXlj8)(?>7+RSrj|09vV*3hQNPD`b zc%s)9w&~m_W|}b&=XExMeugfKdZz>rZw!*&?|%vSp4~LO-UR+-#gS&44ag;Xk`P@XczV_-1Ecv17$# zm|1_0OrPGv`d#D6L!3^1y&o`-;1ITCUK;8r`eO&P5rf4i`X?~PT1x}LE^{D#{B8l; zJ2U9Yvd=7ST?uaCvY3779khF(#7<}_h_}{F61mM7LDoqFsYa)Z%NVE5KU$aJIrQMw^(ypG@jHK1@h)GIDg!&DHgxLBBg}cBO8>$QiCu{VrG{}(m^hMDr>%zU zGI=)fi4#psd{2YV93yA*2^5!o8!z7&1Mi+)=3N@=n4D67c6RbPR--I9%XSvCn=u=q z?E7H6vEvMQ)r5lLEDLyTa+KHgs=%}t5fC-zD8vZ&km2Bh&IWQ|W*7^O%E^LPdm-gE zrNN=+1Ep;V^KtViW9mD6E=7EPQ>D7>1RI`aKnHi;z{zLQ$jU5?_1xS{_Q@$s?2ske zS#*HiyZ!`W-fHT)qzh|2J6Mdam*57u3Gu$w6utKe^C(#Zlikz=|JZFD{oYz&{-5CA zy6u5Y-$E$SeIhg#Ex?pT3b1ut2sB%PXVJ`JyN|s2V4>p$)zpEPPz62d;SX9LZOGZ#r2Hn!;v1e zU-5z|C3>(mvLRG`zCQ#t$V-wo+p<3%izrUm|Jbp*50{&8mnAPfj-Q5Zql6@Vu)C(o z{oQ&G2Y+p0r#7~-L-X62=AcsCxhw@QB+a4Nbs;R>{x?%;m*vCeePkzIxWnZIMNDnU zbi6S=6&#e`Vv@`|7Bx44zoS@!u?^So>|saI^*;@$vZyxuC4(E4&>)mtL>6 z!6Anj?^kXG`!$2$lUXembWG#(W?pB*6rwn@i&JP>eHC7w@r_?U#DcXHD!{L~Wi-wt zk$;>Pz`7HDbJ8(jt9w*IG==TE*~ni4u0a*P@>uQQVnA z9~`bZ6lBMY!=yvb{8#6Vc(-{IW}ldbUOH0#bigk*=|&%F+qjH;Jr;p+jx8lEQiSLG z!*J;f8CbY)3!FXwk9#cFmqMbq^3Qzw3;$0DccXMP$#;yV(IadqWYjArdzFK4&#$1* zW}$Dl<0CVudX34h8!2E+INDEmk!gi9N2*#Q^+bW=GVym#M+qW+!WcP9dG1+J5Ag$cMlOS$i1i z_W^V@WyySJ9QGy!vz{>txMzlxy}h4?x*L6=x8D$+t-g+@UQXxh_o+Zdurb-4*2g77 z`og*`FPOs-Wk_G}my1~YR-!n>h#869D1CV(9f{e`)EXy2;jVTzwq`p;ubB=rSv5@0 zIwf(m^P^Kc+tJX0fIYM$jjc`W>oFeOh26>P)Axdfo~!6tzZCEr?hfWf9lXw-Zf>IC zVcyj=PH;}-v$(Ap(4v$_>-HOx^_J!2r|-;W`$;&_U2Wmq@W;{K2bo|AWs}#X3mG2; z8dz&gOAm!Z@n2QVi&CbiTFI_f##77NQcRc^AaEQ< ziI0u;qjxRuMRP;cq50iae)*f9sD8?h9j}{BY_k#!EjYmGyQ_iTsUXf`>HxU=$dbE% zH-vkz@jI5BX(9K02GZ&uXZeatno=e{BzRw9*=o;v=sl&v`UD12!OcIcw9|++n7eQ# zqB?H7Ukm4%l1n<-&2T-$l$F&Gz2qLS$hpFO$z>QxQqxh zre(Ewe76HU(7g(mW^AHL@4IkZdkj4n+LH>Z>Fkl5QKe$7Hr#0($tG)0hkBd=+K)~G zGZ6M)l_RLH`cd!-SwK;J#!1yp65!9t99l7Y8+Hq>6W57jv1WcW%I3>~)rjL@7x)Qt z#ty_r?_j=ZTPiEF2t~W%KX`hDI*5e+kK%++!0#O@-Y_hNU*sK4O`l@;;FqdW+r3d_ zX1S3)`#gX-MK@#6;9mZW`$&nxs(xfU@-TBzEal^6zhhHt6uHgK1-C(QRGlNP7DU6`@$T_X*6~H$m0*@Sc`vc%ws2tKR{HoGMtncP{8?jOtot& z{irvkelu_3hkiGhZR0@!)R01pYchGOuC64xW>p00vDuF z7imd*zn0;eO|Eo%;3jZ75d|B!cCoS67MLnzBP-gz;yd9^xAj0E=@wRl=`Stvth>dP z>lo963sx-nv?KkC_lBloUlQ@Ps3_#d7hcMt8;g(Qg@n&6_^6p!!FW0>xUNP;ccN&3 zjiMB?z3Ju?j*QnxDR56Yf9~mM@*3I2#^nlpQLR-V?Tv%h8L9NmN$6#3%qB_22$=rQ zn2L7$z@&L@*l)kQ;FlU9WOHA#^btM0pR}4?_;*>TrhGHMjSC{jp!@lNCY5+f+cq$Oo3^Taw+-4FWqviSzXw#17U(@)m+`?!K;s zoDcg7^+tc>{DUw${(|79?N27pHnGm-Tj*(tK2sbJM9;4$!}4|U!a355Bi?vH!Ri@! z{V?Lx>~W&rrw2Hhe^1$zR-RQAT;Y3)LTTS#JF>Gcg;TfFHD!MEq0 z$YQ>aXTJoe?veL9q41RtEm>xQ1<|{}KzSK|USdwcxz^CDdl^QJn@ghoGU5Q+?Ud*d zMm~9=OzEK*cjelFt5pRHQ8r=&pZS2RFgMSuzax1(>$!nhl?oJO7z$k&#$ zq@82vCL2%ox%F7-RE?{>Q_)9H1!g+$<>VFSvH5>$al23}%RB#No2)`H)ngi5XuHRH z8e8y@yU>5*c8V-yt=ZG;{kUb8A-TURgrnQcNc_wQhP`aV#{UdK-dGtI@7+sFwY5nu zznn8#`i37exrGm!V+m$wmNM&&8T^@f(PSFi#R61hFt4?Tbq;dI%rsZ(9cRJq{Thvx z@fP@Ntvy?jvO`o&{!BkbpOpU{V>>()sO?5K6Fo2$|L!=*E-86|sJ58%Ztu_i(C+2l zOdmkyBTisXv^Ra5;}6YG2a0v>=|DjL=k)GFD>FWK5yuOWv9ZnZtWxh0O77cHcKE)WqWPh=%%k3m`!P!UxN7GV8Oq)%rDalKZ&i9;o})@fyd>Uq)e|=VOUp z^H$!ktO-}5m|eW*M+psMaEDwj8$4jNkSjZl-LqwIK=}mmKpRCm+*tr;V-Dc0&u2xu zmL~A_p=a6EE2enQ_cb=Q-(#iq=W%h}e>5jBo*Q&w9^PD2flD`g(|x^k*0jfylYJ)`cJu`m3iG)v*7B41kY}0qyMyp^wG?_`VkFIFoQI?)6+dY4RMxDS|RS%y~;X{4}`w*^_-010&;t?0X?<4 zSe0}SUz%%81Km_XabLJZ5?{u$&7xqEko9^e-a|D*;skbKIR7d}lV*4-LH5j({6mxR znENS#GS!{9y4Qp0Q{;V?>L(&M#~P-XH-w#(|II{!Qgrc4gehNZIO7S1)D>a?B}&2! z)axhKOirXVD}UM=ttiFyMx)`F&PO<8q{$oImW3sbi^0EFhqH<+hGV^6xN)W)s3Zns zuDcdRO*_W23YFn^wm#%uJ%>s{hU&=u3i3J=0gi5RAYD?!BAt|>$4Zy)@f|KLYtjVG zb3IUIJCTka428z<>-efX7A9@~$5@vK@Jsz!)mfwfpIg+dUqA=SUc&SYW1_XPe5S)p zc<}5T`X}qd+L+^bG2k}qs$YYGordB?p3JiI2l^EDfs~nh$zpygm*ZZJ=>?WFedSgR z&-sWwbNYbe$XUFOh7G(*nMnQKjU(Cca0nW^8fW<*LO=abN}en)ZtaSLbZY|&p8Abj zs}zqC<0JSurjBmEQv{FU|^n&C}V; z`tw>k(P+(rw;v#-fWtKY{uDZud=>6KJ4&wHSa>x`0cMA#vKzr~V6xmE$nL0t!#^G2 z*;+05bTJ-hAM(b$tGOf{cw6vzOr{mFLC{}64*z_Z2>(iUvbR>FSaX9st`vL~6)G9D zz-nj2{I$BqE&+9=(F_%v@=q~8HVRrmAoSBkX=GGe+R?Zf{D=nL>BF@ zXs`(XW-{115mmmKv74#Jn3rDyA2!c}%?Dd)z*8@~h-CxlaDq0RN;pQ--c=)uMK;H< z7?;~b;{vfU4sh3%jD5%os%6(K6Jxbhr{3fg0-YBu})67o2 z8jWppM^IUK1gp3nD`aUzqE|osp{g*Ft`%0oh+UKT)*>V6@X}6JwNsulIwrBPf!(Z2 zEs|VaGWm<&oUufh6$J}!)_*Q)R1i0UJ6rvU#hGw0aHA<5y8H#l82!g4)vsk4J>Rfi z>k;g}p9=Y_*HhwpU9N0ZJ_{bMjJ7I<;Be(G{7{=rJqpQme_0XxKK?K>?+j$&{aqZl^g>k-+`# z_knS_V;N^Lnzu`S%M^4%QGbEJmX@zzf7e98&#qZa?fNJD+B_7#Pp!h5W40L7)0Y~= zQP`)x+Ah;GkP=s>;Hht;(YPlBGNxw`DwnW$!ELlKO@WnN8VmZ3Cvd8L8bsW$0msZG zycAp9Nr7=s2JHZ#7h=l0YWHSEG z3-ufa;($Jt)UUFZjo(?#t}j<-&xP4r?T>g~CcqB7UJI;|gvHQdHIKP`-47~OmEgR6 zE!v{q$2T~aV1<-j^#&bUWnq%Z6q&HKuNp4p=N>_T)qI+v99 z8Ixu80lwI?ocg)wgYke|bO==fUN{py#_z`AZwf#w_!>TW8AsCvcG5x_F*yB-VWCh2 zjaB$hu-9@MC%v zF0shLr|0MJ2{RwSF@@V~5Ld)>H6G!fK}Asba0tv@pGe<^>}NBVzvITb^kEzI1csI{ zBhC^X!6g@k9&hhCq!GZ5w-2$)u_?f=P=;XO<5lTTqX?nzdH$ECiodWtQsD0bw{ikEQC`>s-R;3~ZJ%pW|i_owu; zCgd#{!8Fugp>>TnSbj5Q?v)#9v-1x&X=^prx&2|S%SNKoog*wRXgKt(DPc{+Z*m_c zmf&kBeD@NUqHR_;Tevx#9u?n3ExApc@wae1zoMLFA3V$-tt-T6{gL3*?X=ydAZA5RfUP^e zG2@4GQKvV7zBQCnqQ(SV`S&lj+_uL1$vRMeE}6|pHU`t>TJY#uooI*AXq;I)iOsjT z$_`&=tkFN8yBe|rPF#0_uX9_tiViV3w5Y;=LDM0$_8Q2$zC)EWwd{bI5AHr3$A5Re zB{}&E`P0D-cz>}DciAmNa6a^<*Xnh6+{_ZH-f}cE<|zxAd6_S*Y+`NA9ccYH3d4tn zFr!P&{NbU0c+P1L^ZD(MeWJE8Yqil_*5EL{A=jG?s#%1Wmz~73?<;71yek9*aHJxZ zVSe?Qtmt<%DgN0(pAHq!5~m#0_gzFeKQ^&HyHeSVkuu_)n8tnuJz#fq_Hjl>$MdFF zzhU8}S@dB2e99B{GhL6ZhCYca9fo*@;$?>z}^h_cN(XaYh1eS5o7=^<_c* z%w7yWHkmC?8V=g=T3GWza4_m@N=>%Zd{u`8Qh7mG)~IZDfU?K>!eaXz?$}QqUZg(*fAqvKkHPO~cwH$J>rVx>lOpK1 z_YY2ZJROH!jHX3DelUZ{vrtm5E1hiOO8U3nFq?uobXoli?KvGvg|&zAboe)>?@R3tnz8*M=vnuif;^FisLDRws6LglPC{DGJ#zGh4xn(8gg1|Qra zDEKsQvE&CgPBjVA_Bn&>!5DLB4J)T zhzeHtg4u=`*t^=5=4O8u82aX7iBS=L*cD0+Hhz?+oXRb`UWX%*ga5OGjJ)Ua4PqVU zo{|P<4c+l=`Zn^sok&*C^@wenELm6A$o!Ta;|4iJb9K{)(=MAhlv-8bg>yGC_|;IF zW@=C8Pvnw#kOl>0X5s(;c4uQPAHOt|&z4^Zean@}yL=3cSYQdOugF90f~BChOz<@} z+j9-ihSGnUnrx`?w+z?Q>})h z$&#;BO{2hyNEW7TO`SW#c$vjTbad}N+-H#qcaEm8_7^^6 zuCyBqisIPpbtAdI5-FA)KLP@0n7kf7U&IFwt}x6Gz-5hw*iGE%W&34bCH_H239hojDMP(#s+Nf=F69Aqg8?-jHz|S z>o!Tewcjo@`khJ*Zk5rt zpg3qyR~7%C)3ftYE(;nVuvF{H&^uHMT{R`#cX!yxoz=sQL+^99-bz4xWG7wp z`OHT1-`UNX&v@WMPqk6$h>lxm9-(G3C4~Q&9eeCn{1TPXmG}L3X`JTWJ!x8DEA9O2XFm zPL^UlALR1x!lMjlFiQ(&gUo~Rm4P#-6;#dZpF1zPU2zJeWh13#CV8L`RZLAXDOkHD zkqvUX!+Kjy=>6?&f=8$uf9YIeYh^Yuk6sbj^pk<)hr)ZQrz18fb`}qEE}-Ca54x{w z!CYoWQo7eMc<0|FF#GHT55{zqSyf?YV;SZ$eq>tQK7DsYVMeOYqj* zQIMxtPI?R8=#n z_v$u&X7X-MON-G|w^sH=8UwXU$H3Bd1NQE}gUnf!&(?Ibvxn9{Y3BY-_@Z|JEKNqN z^$?gZGony?{2w-{;XF<~upG4>zY&hW-pYTEFR~+NuZX;6-V(h|a3J|dXW;LVua(kC z_gHi26Xx>Hnx!qWfJ!3^sA!15_I`ut?GOVl>sUWKf2#>NYg;wUh~PNI7BPw!c+y`X z$KkhiJJ+ef;Tc^|^2yqd^OEO4cTHcIlsXW4()-b=pKGz>$utZ-Z$NgfMVM7RRG0~C z@OLveR8|fsIXfUg>gU+M7BVY?wgzy*5(Hl3dWd{)C>k^bwzb zX2~6#GYmsJUUNN{10imU9~{5`7judOSnR+GdURq5Tk-u9r+D-aXOjJhyW0Jo8&W4? znWMKbe3`|mi=V?a;~C`l+Jst^5At^$$AZ_eVZdxVaK|VMu6E@}3XgvZSFNgW@yi?< z^|7qdsdp=gRJ)jJ^KhL1sg-wpoK8hY%T?-dQh0aZwt-6y-+!e>kCWYg( z4L&rcbrh3XyO(Db|Di+eNw~kb44!A+;0;pe(k%TrmbGaD2+mdfuQD7WvIfA9{^peb zz=8xK9t^U-4{9wXxO*J(BO3rLivf}<>AX$ro379&~EZxsCcZ3E%o)Zq5CD85jegFznQ zSgl-xfz2BzzF!VD`p$>HXXnEZg%Gkh#j{ZR3R3!}g%6&8Vca?mvYV1aY76h#)~=~% zw$B$)Vo5#8_u4>BXgk~dV3jRp~l^E!yXELfG}QV;*M(-us<7D|8opY&Sc z0Z>s*q5D@#`MkE*P&slx@j`yZapxj#LTw7Yyfy@iL=L#IH-|f0=FfUN(piFAF2q#z zWzHtI;oW&{mgAocUs8(zE2`L^PhYWS?G~7}JQDtsSiq?MxA2}>0lYTf2cN>)S@-;D zbhj#!x(o7Y>u;&xd9T4U<!W?5Be7U@sQ@MVr~~2RoI_LVDVi1>*fXaKB>Q;=fBJSa z$v@BHkIJ5c;FS6FDX}cT-xH+%yrd##zH*FpWu?I#><0lX*^^FXW}4D z5m{XIDhSj|q~(vQAmX%~^wkJ0nx{U1!pALQiJu>`F-_5&O>r*%9^1-}?VAL~;fhfD z?>E0Cv4nYy_`?M!gt2$&R?vU%eO9ZpfLhdlF#G4VoSln-kVlz}&Gw^E?UE}iFui~u zmrUWmXsqR`-U>6T@4E11^8_f;mlHBGqXllBEhQvov+b5kXt^xGs>8wzC;ljvJ{U|| z3kTw}DcP{z^a3-5Y{^IWBTOT2C!BYV;5}SNz^+Jj@uk)T(x2W4pLU)Qco62Kswj9y zmfCQYM;ay1H5zdDnn>YHlYxOUO}KVq5+Y)S(zZDL&&|g_*DBhyo1j&$ETe8f0FUPlGpbG_Cszm<=6HCF>qiy!(Ebu44sB zvjjJqtl;8(_7Nv;T8dup3iP^HW*7!g2=cR1v-W}BU zfIYnYm4?a9&UDOJh7UYF8oPZW*yMjhAbPT^X!P4(IC{QDDQUY9i~3Hhsv+C z!1RVQ<+zMwc?u~M>y^(=)bxRV%hxc4ImHw)C6hEiYQeI*kJvYTBjzg1h&C`aD2=Vf zbUj;ML#~;7^?v>cr__=Igga`|EL zajcksz(q&BVV6`0chA^DNk3~?po>5FyZC^r=P!1nz?mXjJOG7VrKm%~9&Ir38DBC$ z(LV-4^Q$1|dpI9fGL`MTnN3O`qw${R255c#jazgg02#51!!B#U=disUbMy*hEU1 zdzsx{-oWL3JcG-&jAE6!1JP;lO|0#{&o&Rc#KuRb!<}jjtb>se>*4+)DHW0;GfL9nHLvQ?+v%s+vGM`5V#f?VCaE9O_7d)424$%8 z)fZz5h5d}N=PFb@2Z(FN?!=BUn(3OYO$WZcjDEe=-V$He-7^p zskhRgXyFMOW8(+E{fx+Po*G>q0;G7}57q6>@O71tesf-ufTJ)-e%eH%oWnR9*99m9L_k3S&h~;=%t*^uaR-VTG9S#ul z)tI6hOPRM#0y~&9k{f8Xk`H#S8fnWV6=--mjM<73~L@Fs2L(HPv`; zITfrDSU3e2BFQv<1)cQYA~+J#IfW~hbm{sRd@RUR9gitNfy*wk(yRja4xWDROedWs zsmyw@JLEkXO$z05Fl(?T&Q6(0wcfdcxLu3$Be-9bmUYY80xPgIeu3iUp|#b7s~=Zpke@je9~|7=n_j;y0}7a(M8lQ z84GPJ2gfvvLEcImJOuueMr}VjIx&g2S@eYs%@Yo3CuO$&SEp#>yr~fVK*}<1t*52i z+i;Gf9_)7rWAl`sV6{k=AMj!`Z7_4gpCH50QP_wB2u-1*AA`BTb$sF5(Y z2|+k_%xMMuU{XCJQdK@jOXundejyt$9NNQP%|6czi&ydsZE7g1;J3sm zdOCY%yc1_C$3XkVKd6wFM*p59Q@{3^7yW9!OY@$D8|cwMdzg@ zlCiPbP`-eXD7BK@zD%L+zE_}+lTfc}q{6`XK<-kZDXtTCUD~HFzz0_svBw3IXkmdG zD7?Btu_rX)@LLWv)@v}uZ^xl1cRwabxeH;3VM$XCsBS(FO){(Tw)Hzu*=d5O5(l%>c|0HLHHnt~;5hSn!-Ra6 z8e1~7FMXV537efa;J8;?MKjBHaTW94FavFC)Og$%oFZ!J;@={E&*(0`E?&Z|e=wGw zWg?hPnOpIsWicK#234)0nXRoDA@K5ZDGi>_t;Hh;zM#vW z2Z+w*^ySMM*ejrny;|n54f%q{WJx!7Zh9Jf@J!@G-pE zR)PJu58!<=>>ws8nRj~D4e^E?6$niH{`)P!XQ*FHkG4DL{|H#;-H^bQB zDTa7WwuVZKVW!qtWxgHW zj;v&7?g;#%#C~GA$|{fyolbX8*;2E^AJLHKCm?G3MDd1)!=x(f58(JyXC;QF^GWNc zI$M~K!c`9LW|#atG1>APXWtph9+YJocC>L;s`Sci6_-TqZ0FLAbo9gpnowj! zd76KivWt{-Oqy;Zn-Q4u|mk>$Hf{;`gS;nZc;iT0yP zxPAJwA*DS6Y(kH)BbEwoC6_#G)(jnxuD1Ra@!x&e-uI^ z<7ZHFLN5Jsb)wA7qcBg$g~HNY$T0mFhoT|$bpKL#bEKUen!W@!9y!JbYROUoE|M%8 z{epdatP9d7ulT6VwqhJCp~wD%A#3hY_&!m1{!R{rWyu4@(RUNz)S-N~t2B~^_W#J^ z9ZOODh##D{@nsfOUyuKO%SkTCt7`sy+413DZa19^nKOn7AVL+#!`s28j0QXy$^TtWbp-ZNay_!GeL0CLh1|^K{0g{a zwI5%_b_o3AKJ4z5v7GnT68gJJ4w8$@QTKqrm$flx#^2YXyz*G;+O3sLsS`NfH}A9An<;F+VaVrAN^RO>J@`U9RwpUORLQkUL+K2Titq6f#; z42FND_LP0^CN?SO!h)s&(%kWV#oGm*)PJ_yL1( z_Cpct2t0zbtj{y850TjYL*NsC4rRUUFzz^=j(t;FBu@-9`6TP-?B>vsxMRc&4t$)iVj!*nnD z2`;hn!^mv_jEhiM^pFwgW3Sbm>`dByvMouOjSL8f~ z40nS~!(QU1S+n6-SPbdhDc<%-5Wjj5L9VMty|cjXo@u`-`(X?y%}MFY=2j zWu1xnc)n3lG<^R-aOpb&c#l4$>(K`U7#|B-;Kw%DT^B{_oD)@@i^QZ6XUXzUGq?Zh zA<%gt3->l1pqP=Cw7p?2s(YNHKTUqjdhB zXcQ`rmgSuq2f|7jV-A*qR+{Ei4|0tU6n4=_ZaXStV+ksBpOG)pFn9U$LZw4kC-Z&MCap@D0gfloAFXcdXaDCZySDK5jLsBY7rqfxiuQSryOHu|KsR9 z{IUMtKW*DWDMTapx| zgqF5aec!*|U*OS$-sd{!dcB@cgOk*6L+nTc zlV0wNv{{MTO%TT^MyUh>lfjKU&)bq&coCR_FV{mC9$?hSMj^2H5og^BxT0lWT26@~js%bbNz4kL1w@@4+tp z0yL6r!#5Mp(ts!aa9jA2)_pmQHf~;k@#6=#kkOChY<21IKeAzI}xT@LJ2Q7euM-Zv$eOkK1B1GfEvUb=vH;A{jA1~1`UC1Et~lf6*r za+7^ILk_1z%7Il+7u;MfA^aOA&K~VDLf@C=@L4X6$g1CArYawUB^#eX#KS7u{v`ta zKa8Sp;eYf^VhOeBm<>*cx1j8tGvr7k*L7%_M1qD}*`A*BwBLR^nc=6yENE$^!L}oe zM;GARDX05DLt0NKHOJL3zwV`1UwjFGE3Lu{0GsnVOKXJ zZd67_LW-zOB$rL^&&IO0+eGVAGHrM1q{Gp7h&TTVDl*^c!U$C~3|GNxR;lb}&up3y zQ$hCB7t+Xt!(YVn#V(>8u922CtJJ@pE`*sZ~O>@NG4xMDN)Nbq;N~Ol?1>{ik zBE~v5k6L(&&>_Jcy8ZDC&d+6npL*5FS}jkknB7jCt^oOw%yrcSV=+zXA!P=YsZ`)) zcJ&w@89K`t@oHc>o$Zqjbjs12tk){-x@7@S4`D;ib%&x%8hob25cRRn?x*JEYjA3`IEu#}Qo#Gw)TOo+|FviyR zb<|^75i?PFA`C58r&=jJSX=Xe=-PAcX>VhA#LQxM+*N{|&+EwdL)&0d>>a^#?p*g` zN2&SQI8ta81LOA>($s1N!OhKM@wd!Lsx)^#4!ea@zvWXj-4e3B>73R$yZAHo7KxCat&NlfbQNtWWYkCjG7>IvpE_qG^rv=*21g zDAk`NHe(}BmzWR#nrqmn7=Hh$wjMoMi0{?}6@ zi24vm{%f`1HOpyJ^aZBF&5n0HO9kMO8J7=!#f%PeY|3|QaQ&azbaCH1Hn>lZbzQ!XQCeUVM@w$X z949iVK^ixRWwUqA??S1{m=3Hjq9u3(uiTFy|(q`Q-<_nh@Za-@SZmGH`OuL7Rd&OyZU^q#h zbD#OXV*@QeScH@OHEDxLHdAJqj~}~}=>9$b;O4BetI&LfFQzPp zO_l`aFH|7YUz(tFz!Ca4e=b;$pM?$EmSD+XF8v|tgw+<=v@5)sJ>|Iz#zoJ?%(gss zX@xGVES-QQedpLud!%7`Vy^A}JI+i_yd=K+n*hx>rb5l~J;Y4RmoEGkAe7m666+72 z6Lf^XBE5=vbdQW2URdOU39~EVsb?&8SPf9pq=b(pf8oQS9GJ8C08U@l#)gL+LGk(~ zX8wx^c>1;s{gtOu?~90c9{5n*BZnb*@eZ(lZiLr%eWAHyCHY5VuL)EphcX^w9CJa_ z3_tSEL+_|NYR&yD_@a14aFpxi1bU~U{L64UW!Q(-%|C>y|6Nw4_3GL*I8c#4QW*uo?k>3HuqMtKCqZ3EBqsW3 zbNhD&hr8Fq#=GC?gGWl3a&rb)ZQ?v+{7)om;u~1N9SUx%ym6_8J-nYF#!q*?&TEtd zcpI05GYsQ#foC0r_s`|OQJaMqnr_qSW;t{Ru|S#jlUS`glV4qM3|9Qv0@fELV1`L4 z%Cag#8|QR5d$=0A@(P&yXSmMMTq)sw&N0|Np$0Aqng|}z1oha5)bIRJh`2hD8h*{d z{F@xFw#}NqW_dVWt}z3~`4y6ayz>~isFi$EUWOu50krS_M2FAcaZ_j>T*^3$&t)FL zwIOX#71t7O`CEdAoPBX$U>=hvl8?#aX*jJ{jW(@SLwVmdSiZ@a{`~5AB$H16~wXVT;l>w7Y6dWwjns ztB2gZU*ZG}SdheZk-Lc|+0KXt^4a$m-DI;zGcCImO#>wr@!RNG=F;g$Xt+KI6YfT0 zrDhC@*d6-+InH-kCP^Fjfyh30h0)p$=%m9S3Dm&-3a`(jf;HcN94?=i!i5v9p@**oMXi@?yB2(Bzx7jrQvLt=chf-a!Fw9+ z&8N{zo>JYPFR4b@UI7zx7-FAI!k`K3xWCCz)_L<~db&0YnA7q&_SHm)HHxK&_kU%q z*Z-u`AH>qI73r|ssDjk2&}Fvecw*zE-(;-aX%Z`xB~}(&d4Ck%kvPp0bbp~g{riO5 z_rW%{(fAO&Dt}Fu{OiX#Ke%jCh7I@+TaXJTJHc02OPUkLz~>MNyc}VM`<&h2=GOwA zoy!6A+Oz~cmu|;kMJb`)KRIapG65uPYe_OUbKH_}rX!t+#7IONqWY~N(Do9Iu8;I^)5S}enT8jLG`zW5gUdq3 z;)>EAq+rCD^T;oO1xE?0PAZ2t?xOs)PoHA2&J*gpXB-ZUafY)8O2DeWfoxmu0+~CN zna1lWylU6&P*nboacQ_fdTX;0?oY*m$kW6_lck0iDoOq0AK0~y!o=qbaNbo#S~rkH zU+k3RHzx^cMoz&o&=?=`{tVME8LC>2jF7%n2Iy$-u3a4mRs)3~ON* z4e2sPz@4X}X$FUK*1FCHVzZhkIk(pcsImn=dhg-67=T=2c{1u?nnvQH_(SEID`8 z2foeUi5ihbWa|?}9PRHRvgk4*z@;Xqz{EzPDxkJLyY8-|tcsJ%h;(2HmW)zeN5~P3_cBm7TC!Bk`Q2}FR zv#?W`Kq|L+(<^%$I3BDdN!%X8xp}$!h2<~OBo&3KvO3uHvYzbb9I}f}-68P>e#E+w zkBionAeYm}!z-RKhkHIyzs5~ud3gi84sXGBcL(V6l*Mut0=&JvpSiYmA&R`a$Hu4UQolce_-rTFXCN)Jd|86C*_h*}u(W{Rb zA30NI_=ixC{pAzM_}WFkCd;9$R4ezw&rmJlZB$B@0#gqiq%BIYI#QGzUGa(R`8*pQ zFK~fL9COUq=^QTMX1C6M7y3D24!Zw5h>NeKkSB-Mk^%QV)GfNWEx-OEF zI+jp*SHQV6%HT#x3;lO3hTHWRV%naq?DHq0;Oa7-KQ&#MoUOWuF>20GR%H$X?+nyG zS%NKB+(7$3N3fZBf|#9*1cPM`AW-IdNTuOSgrOH2U3&&uT3jdY>vZg zz`DMdRQMgSr&OOGIO_;J^b4it-}F!>jz=#%*G12=iNgEx2o~-r6O4-P zhvMjY_)6r6PzUMi0{{%jMo`F0nx(|8R2DXiulnUf1;Wgplw z^Tp(BVgyY(R}F3jj~TTkLp1!`QhM%-Dk|uYAy+!0$*s|~f&yhlh&NtKN)pSdV_^*w zaJYlKjuZ#Q=~dLMu8P?h zc(V*MwnmgLv#Y0LO;W+~qbkSu{78*t4-%KlQEYf(9=-kK86C`xWyKn$F->kPmbw~% ze?%~~c%Tp8^ghzY-%Ygo?P;9i`+)qm33yD*r}-X?@;fp5VW z+$5RF3-I^RUnDYa924teM22Qwq?0#ZgNsFFIA4Fm`x!`Qd4d)Y<}lBYyPpsq@7Dh88>uy-C$vm$GZ$jZBXqdRud#<5UtH8CZ$K2`lLR&1r(`T4pHgCy((JQPeUD z>A(DQ^h0qYJt^^oj!&uPjW0xKxfMvW%R1=GBigi6X(`utNg(e&Z6pmr)5*8%FPSlW z){z3yW#o2|v@kWHAJvU=$lUqL#LIFM89OZ%it6iOopwGEcaemIIcw11tbs7WRs=Tu zHiw`8Orbr5^F7NQV%m;OsPoJb5e&Ld6zsWV2l?}5s6w6xowDXFNIZE$``j#`uV+8m zuHa8bqs}2MO&|hUC$i7jmL?i1)4*q&F*02X&v~36FH2sN$d3j1DI<$WckG5ti%jZh z8b@yis}sLgbHeUQg*Xop?i`nb`jKgH?eKiO^LGzy>XGDo)SV%FGnSLIZLYNDvO6yP zR!646Gv=IZF8vlajeL5S!F)ZOA^4=EMRe@nu@!d~)A*n`y6xyRGUi1K?H5qGRTvB* zf8VjQw3m?F;7%g(_YGaTTLz2DZqn4X9@HU~^W44NjeiwB(Z7G}DG4#beZhB_*N=}8 z75_H^J&rLyTS^Yi&nA+VJ~=_QcP!qzWDUng9U((&5}F6^p!JdysW;DwHa!+FQd6$6 zQ=P`5G}lXs;x}=3t`uhLpKijq6|%Vj^?jd*dweUmf3=CG@!H@W>g>42LOL5N*(*Jyj z7=F^HGZ#Ol?>C7-`{V>tKQD(kjQLJ~wVz-Pt+(TR0n=gUfCn?!)gf3jB@!0IsbZ_) zBf3BM1QhD*g0~#+c~z?$l@+mNlj_IOm0OV=R+7Pk%{MUmjTu$z&w?-)dunB-4(vrQ zCVNa6e0cmB#qQ3dT?LzAAgz~I!RII_Otd^%a9BW{G&3RH9bA=p8nn!+c?pfD+ z>P+U|?{uDiDLuv8gLAvqLGa5UtdX!~HFBeg<-f0V@fI0q%5y@rs=(#?w?JOV0BP|7 z`r|FBQ>>x-_K`4BP))IE9Jw-Q3OH-bgM__dV8eZ<#=nSXW~*kehPEjPnQS#j&L@ z?t(gg-8Mi>FXY0hwbfWYW&`Bz8N)WUe4^)r3^B!OH*T>OgLT=KcpT)&6y_?-T7Lrc zzifgolOZzZ%yDjqzX~S5Zi82@M?te?zv+TBoQzs+@w$`xR_ zMLV9fE5;kG6LG<3JL1i$!M4kkz=c}^NN!6M=mu!v>Qqw@yS)s4th&S)(tR*lD1*Lz zh^%Q8oe2$~``HEJUJXITb$zn##VmAu{1pByeF$r^xV&MJKE9LR4BBrw=h4igcy#ko z_SDIjw5N{aWn3#Dfs4n1r2QA_mXeHSLoYCBR2+tnUE`TOJqTsn{Gnds32)4mt*G-j z0G>sglemD{Q0=b{tE3d@&($kn^2cX5_EfX2VZsmg@5c>T`#YLW+V4bWs_wz#8>aEy z(^J6gGJ_}d4TaOAayeFDF3t3Eg4?2R*kyhCRH{%!Xw&nb;IzYjRIa*00=`TK?;Q__ z)^<@?ty2j{XBFYuPg26GBEMk%j#B!1>o2JAJB2dqyBOyqvD75+Eshs);%{O17GVU=$HQUm0m9GSC`f(JCo<1dmCPxwCPoucyE#mT|gL*Y*^B(Me0z9uql<vlXzy+ulj=0NAOpE&gNE3M8S$1fh9kEf3OfXJ&InD#slB?ivZj`eY{ zyLO1EnPfqY%xT6>N(|G_M?rUlJUi^C22DRoc-K`pU-gXy8q=-ExUSEm8^3ll8@vN? zlJp(y6-MEb{*%P&ehB+?vJK6!l|sE)Gx4sA9p-Kc#n`faw2!}piR9Grz3OU^c>F2z zAt4d;?sI(CL!Rho{FrLbi(@ktCjmXrIbXNW!qL}8AU^U|QyO zRri&A_DG}tlLCl}=rr1{#AW>Ywy`m52B-&ngzOrvAp4G6kV+dV+t*x2$tPz{sN@8G_7`~inO|C8%C3?%PNU}>YTQy+~ zbxA!!^}?pv#tm&DPwu=WTYPR(VadOwW zosJieBfEb@pd8mp$Q`YwAu*HLdS7MIe^46Ts-BbQZ+F|Qv+Kp7EEjZreh?b&>B8uV z>EI*3kqJmQrN;t(vZw8ZTgWRoEvm?f)g^Qi(#VM zY)}}RkFqH-q<4zCFev>2>^M7vu3TP8F1VL~MotBeS2+n$sjmRH3Q=ooF`CDT;E&mZw8QrimD*fRLZ_T#b!VCg)K?kv9e1U{`0Z`B-w%GEE6;=yH^~uZ#8Hj5tlSFs zmyco7e)vP2mmVanbB59B4~WzbFRnw=NwkCSk&BZL;>1BO*5o4R&`7#NH>g{)62bK} zWU&fS(~w5z%|2{TNjuv$7)|7MucP|A&e97*v4Y%j)>Pq_EQIurgDDD*7*M0duUP#D zELK?vRFApCnYB0RifU0*YM(?`U5}%WZW{;+&MK404-WWr*Lu8PGLQVxNx=__^XUZ6 zr6smKnd%IDrx)|PAW8igwYcZSIt@N2lDe7n*uNC=`CT#0ziY~|vvSCod@VA;gzEt6 zRZ-oOF);nWcDnJKB8LCSVB-=*sIhY+o$-udcEv_Ksq=t6EHVjT2It*+I*Hy_Q)ZVx z+eQpFSi)&lc~ zWnj;&dvs~gMP@;%37&i5MuxbIOhvLf>~O7z4F^)e;e9Ja&*3;n9m}Y5(pF5KV1TD8 zAECm6D)Re_6>L%Lr%Psy!vd*RDi=45MN;8p-qknoi{nEayX zez2HLfo3ouaeEcWizBmf|05qXTe6FGz4L?m!xq?kH4cMMoy4#?x$HBWIHoK{gOnzn zVxM?=lZoE*n31PS5S1kh3)4m^2Wf-;9t)gMW=qpObLbU10!ODR!!55@BuhsW`^`NF ze(z;<-pJBlXGHMp{{4dJC0D8RRVnt)jY29DWyPwMa^H^ckE!bR4!9KKi$-dTiM@3x zlNl?8Db4d>RpB-==qyRClp@f8V@Gv6>_fp}TMXT6i|sczQ|r~+Xin>Fd?X)D>=ly9 zNw*&6(bO%RH_nUR7*%I2i60D_KcUhgdiZgrFOgnZ$-R3OlSB8!iAG2c*@cG@nqpy& zXFj<x>H_K#-r#M}L-c#H|A z>=)2Go@MlD@E7{-K`t6{Z^hN~5@GuLhvdU|FS2IXp4;ahrpYx7_5S>fy3SY!UZoK* z@>Cn^Z`Kj#q)DjrYA)8wBDvkaohN?z9TV$m4^(q4$ec(g>E;sTnX@z|OmbzDKifjY zgl-(0eSx`n^9)80q`=lk*#hB9S?Io^ip_eK^jPm%W`6fIlIS;!797&PW>v8gO(x1v zUv5V^LG1+g9ehoH3vC#WaXFBcH5n&I zktWYE{6Q(sVV0x?cWT1Oox_&}<(hGP`tCg(>OO%L3pAm%NMHD~xQWY?g`vXhwRoUE zf!TQ~!nR@JO!8M+0ZwQW^qp~-yt^HQma3s}T<&WEv1b zu~?y$sLN==p0gTs=e3ydI7RL~m@)n|)X-B5)D+h7k6<}{41RqP!la~Ez!mi>_yx7-^pQhfX zeh|Wuzi%aR)gy#I zJDH>oA7-B|{6MD|>qA}JH5h;KIg>GQ9AdE7GjPHgTts%P7sfKKy zauy?%6Ommqn`1&%;Nqi`QC8Xv*G&yZugy_RQ+_8#Udlt|r^WQUaS>F^|A@yn#eh}O zAdFZ0jjwdJVgJEmxZR=-yF#Xc+e&*<*}aTLq(+c7*96if=)~eqCz5HwP~~Y)(Q7Wp zt_tzTUvr8fRe2$L8Mjdp6;on5m$Gsv*OS$oq-pM|boRN7B%ShE0jBhnF=4MMGIQOb z(_$Q5u2YCBleeJU_6yh%IsrfJy$Rc&KOzfUTS=5!Fn!A!fnVoOGP~0T*S7f3drvst zYO)Qn{CJ*~y8MG?FJ-ts<5x=AMuBtS5q5glc$yTh4A!C6P_I|X&d6r)Wy27u+A_rO zdsyb+sV9t!ZV6)$DTV7L3I#7dE@Yzqt*QGSKNmikIlp;EH*b zbx&I(g$+0Kgbt2bP@=GuAI5Zlm$|0!&YWlvmG6&zs1Zv?n<&% zm6(UUgRv$94u1CJK=F|6N{=n%Q|o(b z(R+btzHB3(w9TMCs+1T`pH827Z$o$70I|DI(0iWQtoaQwoKZJ{n%}%g@@k9dQHPc6 z{!kIfYBOT0dF$9n^O^WhVJVC))q>C4jNxRfEbPhH$qarx&X$RvBlA9L5|^q8@cNA$ z7A1>gx$k81;P7$~y_5w%1HwsdRXCB^=YtWE;be8wR=5)Hg0(XyA$9mo)*&zkNI+ZKKWJ@0A^K{plqgst9mX1 zcSR)6+f0Cs53v1nu7JrT{%~GBintqmAQ$U}s9JHE+%~^}&8au>Kh1g=`8FNyDkl-? zJ2T+p<6m^)$YnBbN)vQcYodyj55B7a`m1^gu5U3Um(!&9hbxUSYl9*jJ?oDdeaDG- z)l9l0*97C0OL$UvkjP&;!~E6iqh~}lp?~>1@_Fzam29ddM;4tR9a%T%_Qh7jQ_`I7 zGMP&%EqzG4dM;yT--92+{qVe|ES78^3$pH+IPs}5ZC>?iq4@0Dmw+9&qoE_YJ2JQ>vw8Nk(D`|#S(QH)pR*n8S0!Z(9qqLLk z{Pf2M50o%rYdP{)^JLfw4yIca!ysu^E z`@3>kRB3*T#YGS#vCwig2L6SZ!tvN@rum#LJF>2a?i#+%W^><2_0Nj%=<{YAE;tGk ztVLks(F(Gl0jFVUY&h0FCBUJHsN};9{l#r58v8Pp$eD&qvg7{NwKyJ z6*7k$YIGY7FOp>}1mu4pDWE+4m@Bl*Y>~f-C;pn0sZ?kkABl z>#Ai)=zdV%zY?@N6rl9!Dolw~0=Jt182K|xFydSe0V8>=*z>P+Uw0j?b-#$N&Rlok zL=s;9GM?OB)C#k^oFG(C2}#*&!Bo4ME-*63olTb5xwnMP4ZI32i^8D!{S>-vW*2Tb zmP5r=$FRP$&y&$5!t*<%JSN^5DAUp&bYnZ&d@9^v>4V)Q~)FLWfysE9=S!z5<}U0|K?Kv!Z_BvL6qE|o=-M+xp7>EO7_sx zJ>b)L8%kBb@YV_=*>MuL;hqdj7XC3Nj^PK%qyHX|!}Go)9XxG=hq9K* zxX1SjvHCCrd-G+4k7M>g*?-#LQzI&@vgbJD4kChH!A>-6cthD@HNL?&b9mz{AxzfO zA%;%HWV@F<6bu}NGc|dzUS=jg_;~|)RawZc^yASy@$cm6uQN=soe8et=DIn*Zm@YZ zxgeaB2l-{jFzUTB~=XCH)}`@?AxmxB%1*F;(vBluvhOYh2i zkt!yVq&ld>>pe3BC95MK=uj`Ix~n9#dYOsd2CDEW^%ZrDtYUAyx=1Ef^un-V1F99w zgNHZ7K}_T&{g!A>^hH~^-A5=C%1!0}i*lub|0F=ffV)2*(1nQCJy=@s40ol7g34Ae zvh!~v?@de;)Sh?<84kT%CUg$Jj{0L^dKmn3;9;R^I&gMivW;W2TJ4BtKJ*`;%C(lX zIJFQxjod&avyCii$iO2rJlJxJ3vkajAOCcZg-FXD@;oXS@;1mqk{E#M0!3&UyiXQR zRKm+Ybl`--8uCST8Y=B71?Rk3DD&n6D$KH?-o~PQ`}5{FCvyoi(wBz@WoLNq$-5wG z*F3tj>!^prL4dGE2H(dQ{7;0j7kze^0^hmw}omVuSULv~m z9ha30zjBQgEK|TQD=!oOCmdTpUM(d|Ts4LLE+`LDl8BC%xWS{&Km##63d&4#xrlL^TuC~C{X23w8` zXnc=VHT+3zN5jC&(;l<8^|9SM_s}yZpU{P~Dp9^J4cf-15bZaubms0HYM3?`eI-v2 z89N{BZ`_SvcxfEVT#-$>!`&rg5<&CLI*8%A9D!r3*|Odjq|zXo1YF%or+cU4#Lgc; zW{nlbj>#iR$+~dFHHF=Cy8;^{YVkcf!d>f$^psE;-g6!MA0-y>z+w|lTE7Kp;tkkR zxq-B3@UZPw1jJvxM%YO$g7)z%@pTfVGY4eAb6A$VpO!}l!@Q~2*>Oz5Z8@S9euw>4 zxev83uOK&X$ikHfZF;QZHVzntkUj6;)1p6daD7)PbIJbmU28H`*H(}&?qPE z!%NA3#|GJDnH{`Tr8Hv8^bq%z=7Qq>vtLY*;uCHVgOEm7zz)Px9 zDAjn3UG{;?N*H#qEgiR*HAcClk=;g3HGdGb+(0guZd zhL;DBSA#QHbstrlwDmZ>Ilr9ll*ku&^-3~D+3`@LuEDt(gHR*M0A~)AzzW|~!d@R> zsGJ-wJbWE;gr*Sgo(wCS{|LG^CDTwXmP|jTh%e97Lf~>~wyQxNL%819PtKR>Znun{ zOaDcrt*y!IfIzZEc98w4zmGUDn=rh-m1vp*Vp9}e^goR}BQar8QVf;4QbpRHTQFAy zyJ$*;IChq0FeBg8P;L1?W{c8YYCe4lYFds%v%}NC|8WeN*gcNq6-?*8Kc+Nlf)?2@ zvV~fRc@u%N6FpX9Le)EGL*^VV$8ocpY_y3)7n?iiwokyW9X|npfM zuo%QQ8RE4u#l&?mgP5pxkPx*ytlYg>=p`4+IF&}To4UAOW!G`)WB3qUN2=IL!FNX7 zZ3>C{XUo%{dzGDaOA2RymxjKb6?ma6hknr)fdg+!iOLyooRwYAngs~Zzg-OY(W034 zk8{BmA4BV<>ELwX6r3crRNUn{UAy}*bJ%kuwtbz)^7ukFZ{9R)FWo^_bUZ)<(W8*O zF;`$@wE(8Q;hZ<4lDsn~Yp7AxJh-j2oje~?&u&uSxOlZSEE_K(DA%8jK0b29Je9kX zKKV)&+ix(Pd1l0+DHfAr803|(^jf?l@Gfd{?BQlAW+MUvi*so4(F^2LUk<$0xkOe) zucwn`Pmz2BK0P^VMta@2c{xo54cGl6c`J$u{2Rkm7AVs{{-??6AVp~4xiRlgMI!9u zJZ&Kpu}))@9hvP7ar+uc!=f2r>2D}}@@NbRC_>hlVaffOWp&1OSr~tpbDA^zaLM#4 zHnpaXB#bmNdI8Ebq-GOw$kd|$IM>Hs4S%BMXF-1&bg>Qb^U2>6SJ~bzD$p!!B$it9 znYNwR=p?<*b>1psWXqxJv@x}pp0NY6Gc6B;h95A~OpoJzCs$ebV+LHAvXUfADmND$edqi z5keG%TgpRW>-2DJ=KPP*Q6c~?aj2{Jgt{;*$*}7~99hs#BffDA5P~!~KpW*`IF4)z zw@bPBmJQx_gAPBIW3Ap?V0Jy6&ZALn%u~11m|h38No|N83JPJ(O=lo~3&+8(w&YmD zGF)ao6DM$f`?4krn8-|quexJ!rbh}bI`SX1DJT=I!XQ{Avy-N*swCHzr<0%R_1J7S z1B)VU@YmOwxI3TQ5uSRFIaB&*)+x$1|NOx0R`=#zS+kdU`+O!-&3D2Y*LIMMIS0?S zrm$C=JGoBS5t^=54PmcV!>pU)5NB}-`~Mk0|7Lyq*XSfPdKBXX;SQW;uSm+Ly3^Im`>=U3emDEU7vr!}*niNxLM{t>cc<#g-fJ^Z`|7@RBc)yq*rKW~*tenH=X3 zqx9>=2(YLP=k`)Dm=SiKyib&ej6WA)(ai!3w&nIJ;dy%F|PE?d3vzZ0SZ$XAFS#mV@+#=rc4eszxWd zejGnp8V0{E#}hH%v1^h%zeIOEDg{i(+R-)=`-G)kvoGUF=m6C(wx=s)x8N){o0eP{H=zH=O&`w+HK^t<$Yuo*Yiv{*6Iy;2YP1H zN7Ao+ji|k~CbjR6)3=e=shVLZx#koL_G_|n=}v9$u&fusSLxGQx4oMtebWw2N z@>0%kk6xlMKCd%GViYw`_1n(+bM+O#6YgiKZ#X3#nRm z7fDERgbUwRQhB8pki@Z->_*2zcvvL+D(n>fE)fN0Mb^-tl0{{o*O1OJADF!|8DPfc zkRJt!(3cYy!HQpbY~t2XGH^GMDK`_)HRCQ5>-0$ScFZEKL;aZU5uboRZPwC=nt!CO zzXc}VT}kK9=5zZ~JJk1?P8(c55t|iUhU<+lt~LD4dX^=FXqOCaT=$9Ycr(bVab3CA zv*+usIH?n{*W5f-G!f>iUn52z1f-C2OY-ELN$xZ*BjIup{UbHuvZ*QV{4t(Ctson% zbo1->UQtH#@?_pL|3_ruqhRPuU4XY{PeuKow@DJ`+6{4?L=H7|Fo8SrnXd6t%*=Ju z=zZVS)HCcJar{}%Tp#QsuALkkaJdDn&6q*nC|cnCieFqu-j75_R*<*$%{+Q<0j>yk zf;-h0VXUbp46Y4eRWMF zuzHF-xbO8N)o(+YxRy?yq(_zPmDfPSx37uph7^v!qKgY}2GG8eT6Wu&J*?Z+!&E_} zm^Q66!odx@X~sWC@=J%~Y0OuKAZZzp@skB*m1(f)r336M*~=R$EQW_c2~guz2Yc)u zFvkn-uv-GOn5iNmxK3RKs)X*?6=s4lkL4i!wJqD~(kHOWOhnPOdid>k6Klft9{;jl zxJ2m*$(|pLzRAbI*82{--0Cgw<%OHHdtiX*a5G`G)GV0cL`h|$KR#Nv67R&#Aw5c^ z#Jo9yjEWK>ulb6`kKc^#^`ng1mk|=$u!nw6;kde7=D@#t1?<)1x+}@@C_Zrm#+tpL zJ|p|V=l(ZxrTi58nafISPc9*klg?7bIlq}e`CRJ1!bm8Zc?u_$jFN@>x1j5YGS2wW zz@9&84Rb17vHaI>u5)z^)~)I#ma={@RLU{4jiOM*s+X+dbNPteF#NS+4LmPeLosa~ zzUpNdU-SDgU^5XG>N?t#WI1zh9?JMIZYsu2(}obo_q0oW8RVbMVcix2@ucMwv))VIxX>gG-yVj8 zOx+nSJFd^4WFJhlz8@oVq&UWk##h>Zs+P{Ojfa%83<_o*f>p_hSjF2&#^?-?vcNyo zeD)aZ3>=FR>RVbX2tooAGrZvi<5?~!T-PL)Awf8< zdLyoJ4S=xzT+&vk!2wFw&;*GqxX1h-yGe`VHovS!owP{2dwwN#+wX>Ft}h_{3k~t= zb}{&ABBU}pDfHvtGAIx^jCSE#tb|FDV62ZS-|K-Up7t?^(_<5vB)=G(U!RElFTG^& z$y8Y8vWV6n{6OyCE5N+ZSty#b8fJ5zNxhW$G*;9TP0pv1$Um=P2h{R~jM zJQ)*HUQjaA4K^O?CkyYSvcG)eNpt87tPSrYn>4n-{4d#X;(-o-k8WL?B~RgUSv1EtTD!&u0yc-R*Yc0j&?n{e!WUOMjq3==_BfDypQB{L z>ShQKkreu{RaEKR4&rX?P5$0W;FUg6hswPp*l>Ou@~!dXtR3&pD2M zR8g@=B=fYVlY{ftp>o^`_`F*iV-1dxgwXq(hu{J^2{L%dYbH#-zMFi?s$qBRRRE2+ zrI0)h>4~XUIIcVt{pPjfTIps|tvN{K?Oe%?%zwPN^UX|Q(+Z~Npzwbkop(S^|NF)@ zM1zKwGD>8YQbIlFzMn*9h)PyQ$==x_G?kJDg-W|fWR!Z&eJ3kaM5qXveQc6Fe&_r9 z^Zb3@=e+N6U9St=UN6O|w{|j_?j}8N5hfNqp!dZeVW4FYzIHdozQ%j$k&T%!Nizm? z#x>K4*CwL2Q40K8;0_c2MMJWVHrWap;+jd3;*k8ka4+*0go320I(HCva^(dZ9e}XW41Z( zg@x5AsMal%y)vEg(A#Fv-{C_mH-uuvqWj`%_vhm5`WB&I=kpY0pP-m23xlCjcBZ@S z^#0_VBr;hFk+v6;HE@@#@7yNK2oG{)4Wv^XOE#Y^K9~)W^euxk0#hVt-r} zc3m95aUIW;*cDqxXmUyMDAepZ7JFXFWzU`>EE-|SrF07)gzXhS{hmpB)AvaEMTtV~ zl5V`n;iTY|7yhWocNdO%9gkXrH-XodQ+({(4thFnfaEJ5 z$eUCnL08?E`vsN41Y@~)`kb?9{yvWG)E*a$AI!&@_mo)Ybs71dJOc+@;~-?=afqn7 z$nl@DDd78Ix@M)qdS{1I_|h>#H;)`T(mDYf&p7kLQ@6I_(=H>?&$N~rmfok`drv|{|1BJSN}yNEPtc*b z=cN1nvADCB#BNVYgz1OVg%M5P;rG5^G27T37wGQiWYeXbc<=)q{1M6XW75RN3Cl_C zP7*#_TSM=Sx?|jZedyey6Prwx_Pe(F;Foif^SyWC`0pBGhI=y{i#-8vmgy*h_ZZ+I zH4mPmAHg}AZyhzJOTL0VD&S%$@QKnq{QW2m2d*+#+$dNN_6yDlUsF;ccFi(y*`Xx5 zGNMNLF8&@nlZx8HgoPSm+}2)|pIE&XX7svD*^8saw{d+ywP6&tzU|Il68G$Ong`B2 zIu9O>o`hG$F?dCEmFkZZRE2#cKZz$7_I4nhq9lHL)r`OOjpOt}J+AuQ1g^i*;KtCo z5+CF!T-K?g>0P!^eg|{3c%DNKOgnPaurBC7;VIi_ByiX7cG&8+AD6xv!>T)Vc-gHv zV!Z8q=&(H$R8P+$pF8`};qD^1b*Y;D_jlo(Y3p$R^1UvWxGpt zz5C*g1^al(jwd{6a5nw-;ye!9J&jJsltY8kGLGqY402pT@a)TXuw`gJ9(l)>L-j7e z>1VHC{*x2Ml>hj( zS18o$jB~qwp*horV_{|l&%ZewJGt%`GGn&0&udHZq*oF3+^Qj#>CfTfDm|K8n=91# zctFP5tvI5r7yd7|MJHtsTh^VV3j_36ZRTqlo4uG1_~dfMrum|GM7oq)Isk6Ou0gl? zalA$H3Q8Vw>@uVQgIouQKixy{a%4K)?Hw#^$n0I7-~Arvn9M-GzwLO!!Bsr?wT`@g z>2CD%>dNC6|DcPDeMH%a^-wYSkkHk97$vw$z4oVmc;dM}jy`OH+r1n(>7X?(N|9J~ z89I>Z_Lbkn=2J!CWE@)Wj63bz*m}n;%(r@uN&AQK;cM-&`F02{Z-~HY2LDO3`BALc z>;+-V*K?J`5ru!M^6>_RTjsMEYSQbv7$92OK4F>pGmL}CmK~(WW;w~20WA96kK|ZAiw&hjxoq%%~ zzQvBG{(DT0O3&D-EdVDdP{>s6j%{s%2@U}L2)Oqx00 z*q%lYE{P;0g)7#Mw?GLEDtR|NAbg55-IHp=%uoI0GyNjLv2r7wvzj2be7i(d9&WJc zm(;T^Hl_2ZjL8;xf^K@c%U-ma_+DVUre({zq7}ZX2KW!tsE9boT0vgHU@=?64#2oJK6C_?I4Kj@IoV|UQOQv0>JA?Dg0d$PS=9YlqYabvuJ(yaS~%lORA@aQ|u(V61xlfW$VPqurCV7pSt{L@;@pcR)*g8zu{8T53%3i`O>?W%R;&-(XzV> zq|C>|5cl68>{xwN$Xh&^n)f7A>v1)jbnl@erfxSF7L-%8r<9v@QjW7@rqX)vTqrx6 z2}NJN!($y&oRMnA6JNKdql>=7#oyAuBQ;ix?_?;=ggfyVwNZFJ&y6N|IMZ44!*F>| zq;R+H1zm0l7m9sVv9`HXcpx!}-oLVjwm}DJ|M1?l-Xja0y#uk=i%>=7`8V)))p^*X zHV!_>wSiwPC zX^#w0#!jZzM)R?Lr~!Umk_Jj1#m9 z63KC`J<5Il%2pXVbMY?&&Qf&eJ|&BB#}geq9(tHe<_r|SDxDC+2A7ecfy9Wt5Js0? z9EI?dZak~gPUtamh3M7iz0=FgaQJgNm}2&I;AyXaQ+8u0{E*(o;*(bF^gso)M|B_r zk1-T*}&%^M~)%4zQCLe?%iXT|Prb8q6VZM@BvUoDT((&S-=a*6E zc^6@&`i%0XjNMpL=M4v_QvBL;1r)5b2Kle97`HNlPl;#v>meg~j@vA5KJfg8SC!tqpD2**gA^b_||IX7@6IjuG0Elx^YGua*HdTeylfY3h(c{fk)1-q74={I92vZ`cHY|J`a)W(??NZ@LqTq zzMglin!|BF(#sw;ttIui8@y6wmfT6#UTEGJ&24vH(46H*;LU0s4l6U{l6Lc;`jq5p zv@|E@H!=)a^^KMWXvybHIfwr{ht9s!z(moJ4NnBZe?!$pt1H)V4OjY68x&!&x?|E%cEZmF0T!e?$7Jj%AQGiAzvy= zP-RJsGTQ1@xR&>DRJ zV^0nvxz=9_s&K~amz}XX`48lT)WNWiZyq&Z&$qdD64J5UYa(j*_&1TO0e~>%+_q z(HWq9{;u#%dp^zHI*>w@lBu-b43EwF%H`8`q5J2zRMQy2%T}~N@L^UA{jq{iP7C3% zFb6ESRZgxW?$S3WTMCJugb%AV`G8*zSk3w(6c?|jg&hm%N{|J%{%en&?hoZBy$5ru zwU#_3Z5z*?ZUVMXiRPOf1;HqoQb*jw?Z)=d5)jQE4w`b~ssi$Gcj6i$89NlsAlj`2 zD^No0tyquWr>&uGL-cs(>RfV2j3D16PgK;~;)*qzv|-p=_%-4_IK*WNdm=w7I?GO? z?dI3we4m?Qtd%oG(<4#!#}uf~k+?RJ$87N|S7^T20sb0ALR#`{u~rdDch~x{BHuuq zrR9rzt0#(Y_b+C^-!qJ?Jnmsk#WB~O8~AAXV2_40}$_(0KC+KAfZ?6?zX4CBvS7xLdcgFd{G_Pnie za!;xygAKRn^6M?!Yfm4XbL*in@}V_;8@~#Ui=~Jjt+XQ1oJUr*iA{P2Tzj@$%sLau zFP5Lg?UFC~_7RCs)c2-%@7-}UxAGMO9Eah&0|#(fS})n5e_e6n(vFZEydGxfMMzrX zWc=0F4n`eHrqQIsopZ7v?bk5ed*>(^JP7A`8}`G7epa|DPy;VpcalAva6`Pa_Yl1{ zYoitY=F`AG_K;Ni557yf!<@dBFD#HT(#H+*Ih`G@Wyl5CvYNW|CMu{MmFv7n?}~}jOong?Wo%nL1q5O zAjGy30#_d4)k|uej*rO^=X@LnMhjD=9F&freL{^I*GPQx4}k$?e+)(>u;3pt+^)HUXqnxj+zh0&h5g) zs6ad+s&l7UJ7Gw+J^n6Q%KpmcoOUr&_`5`;L&`V7XjCi*-c97dl`OK$B`9=J2&3|+ zL;Hkc7}X&1UDZ@b8XZnK;dg1Ks+4ip*NA6JJApBa4~m1T2I2sd4S3764OWkHkm~N? zT;FdR^zMC=4qlUVQiBb#Px5HM69YNv;%WZ<5AIu%jsqJ{$aT3`?qo*T?O_cK>6xT6u_C1bty$&vd$D{){%OZ0EP9}HkU-{zQ-=JH*q>HO9q7#oySih?|=XO(bderl*xJ73g?;j8VvbRb?-7Xi{jTdBZ zTnINz3#Juo_TiIV-pCr4q`K_|9@L*mjXM?id*=i`X4;p&M1}Fu`qA7+@{tY7$-~dx zgPn}Ldda7|e#GWbFP^5-EPQ;V0vjs>#QyeC{NCm|ja%T!J=@Zu$D}e`n{)&@*np=7 zWBKyNCjQ=I2rgK61PT{B!x2yW@{%KK(X;11&I`B-Ur%2I1Fbx)>sm|60}c3F2P15e z5Ks{4b`i~B-v0y4?)yxfdEQ^*Xys#O<4JtmS%3$> zR5ASLI?j~5pHa?XVsY3Lr_Kl)zPo$`4_{%!@=LWspO!3KWMD%DZb_JT=^kx5 z6pgpE+<9)pLkLKaNqio2EIEFczWj44`*1sq{#!k6wERT{Bn?qT_nBf^4H?{QeeNeDhAc@J~WRM_mL9(|8k3m+H$T%^!HS9sozZ2G++ z8{W0g6Zcqtm*9HgpgTl{Us{*Jkn6MYtcc?MvF6-u=V;ngFL|b2kKom-5~p#|HL^aU zgy+^}lm3n;c>MHp+SJFG_Zdv3bd{a5)`)Ofb@X^>>gq4L7e)v)wF8@dp3kAVwfKmN zg(E9U6|?(ofQK9HvAC=f)P9fP%=B->b$>-2`ApWiQpp;#K#{agi+^OE16N5`cQgB= zqU~=xGJNhR=5`Aed`4^Fq?F6x^mYx$<1gH9e^!hr>4r06ddO~< zujglV1Msic1!#$3==QslxZ_(aw4c$3ugeV~UZF47T>qYSc55S>US8}S@JI5&EfhAC zjG%@o#@O+?AwhUgT;mu{W0GY2w9o%^O($UDY?(>^?;lcdP!FM^bDsE8IbIxP`w*Ck?-W$k>(ek8++uoDcS#NwyAP{Bnu#Vo~)pa(#@2rO7mMU{X)kU6aVuJ&_es)?jr&yZd`_a)s zS@6xf3MM}_Cg+9=;^hgwdG5e?eyEx%On;S%>x_4y>2ZXcQZINeMITCE9YV)mQS_&w z5(77OVPEY(!q&CX98sVPQ^ct_wtq0KHj*mg#t?C9#woF#yAA%B+(d1mm5~0{MNwtl zmrps{!-J!vag47MYhKW2)vc+}xb6pSF*FiS+hpPn>9xAj8$-2!xv~cdYWEYMB=uU^^(r}qp+aB7Q1To z=RmEa=yAZAn^xA6R;$FOEPF^heUt>{Gss#OwsUK_DnCD&Pg_T&3052{_E|l=sl0%;@0@XiKW^X{cNQ;hP94Ww}LnopC>>vDO z;Y2*38jXK1bi#g4zsuwk9z)<7KRz_?AN07_9j7EZ^7twbE?J|-A8dQj$o;*c)peA3 zrX1P$$`9e)!<|%X=P8t@4#AL5zlDx%~yw!OLyV(T~eRA ztqb(Kw+=E!ErU6}H}OrD6&`k8BY1D=%t7w^#K(X1xi=xZd#*;^tWV-R{g3o=h5;6+ ztY%s}o%YPS3g0CCP}fOe>~_W*;`2#@Z+jTXn^pv&I`mYb;Zqz>wMwPYwh zSk;kkJ?M?&M`*)^_VK*c_y7)(IM#C-JK*`{Cxnw~W~iZ>%42XiT?zZjmVJu&>YlTF zF6%sfHSf-$eorZ{G?#|g-V*RK1;4G*@8BeVZ5?0$ZCx{u3a6@BzZi{+E1GN2k&iGf58EV7Vnoo-ZuiV6{b9w^3lg`Z* zKZ%ocUa0gll=a^BA8kz2!OLSOL#U~z+-^nyCaayM4H2psV)}#NnFm(L$3w8kX&4@x z;*^lDPxId?v+}I5Xn$N8l3SBl(^*cQ=MKOQwTpbmtqfI`>SN6DJTCF`!2!>G$+Gqn zJ9ghj-3)8XK99?yOV&$pkjn^ax)m>eP;95g>n}rg$w)qSDwz$ukI?DxQC!y9gr{8E zgKmYntlc$QdK+zaUnHdyIsaq0WB7~+84RjyIX>?MJYv>7-7W~87KYP zML}!F^SBN9H2-D^<(f%4zP=;r@4F82pxMjN?B9FIw{A<@S6JYbO-mI1!)jz&LvK^y zv;GLj>Pe--RMxKUjo8PZsCwBOs7!u}qt`TvgQD#DzE=RB&bxrwo7Bio>na|(yn)Tt zCh@g+z}7?8IeTU*7iO4XRO=M5u^x-<_4dPv6Y&(Y#S>$iYN&f$2=i!gfKSlnWCh*np2=Z&{j%6>{d@F(fXkUhZ# zE~;vln-0@O&5|2XudYV_G)7@|+8C)uk0tf(N}|TT!*F)tHPQU`eE1jH1$;HXQu3Br zC(f*@l&`wR8_YK`m@&rfFTwp_${_3*frTTb4 zYa#1)uBXgDGX-gf#69iy(u!y$x;UyoiV4zAyw!qqd^_OrAD(bTyO~t-dvf@x+c14t zu`pxAU|yE!$})*#s@?VkhBr>;Czku*%k*84*8VBEANoQ|zaE6hd!z88aUjXMZ$!Tf znQ&R+4ZiX8W zCOm=uUk@m*6!&FUm2hylzMiiwz5>0%!_XnBn~?JRILEJEgi9sAr`NCtqJGFzT64Gy zuKT;2uZ)VvaE%ZlVX=~w4cm%?`zN#Uh{IU1ARV6_o=kuGBD9xpg;?|B_+P+npddRvrSa$0Hr?~ZiB^x-h7I);M#@l@TvN;EM@2*&Um5>NmEq> z`aXy3Q!3$jw+>>HumsYE#G$8#1O0h&51fW}z-yuoD_h@?_NbD!t6(5yqYtl2mg<`C z$MM@YZSH^9hTp~+;EK10xY{lmN7b0(vHB=FbZrQKZ76|Bn>&!@=s20vSV`;pqC)(a zF$%kQpCOHb{!lgSm$b9BB2nu(RSdG`6ob2ps(C|r;($6ZGSH_$gW)tQcp&*@&cdTR z9k8;G2W_5M!Q1@mWm!34X!m+Hgw)kI?dvrSRIMh_lYT1YT|>^WhGi-2^$i88(4!Ws z7E=Co4ArbY2+bp3L-3^*Ui!QX_jyb3_+~GB6i|hR9U?(KwE+^>q;cieL6~u)o&2g( zPqeRa!M86Z-|yZWc0A~VY0;MDZQ0NGsBW|9RO*ahRw&_GiSHiMrX|`h9Lwvzs&j(s z1bBP*GS9E;0Iz1Qz)|BHc|>kHUw^Ac{(szIhx{dmq`2Z?n@7CyQw8^$bO-8gE4%7bl;zf zqET*Xu+F^)AE?yfn}0`(BM$ClZ=Wc7`A{ErZ$8a~mglkSd~31IM#{N*CKm?TpxJ#wIsA|2R#I*w(2KOkV)FnnGaPkju&iHA#LK)s-jv=69ra+-|d0?TgMH#$4 z{DPlzw77onVD|g_5WfwzF7Ny596EmA%dIEtdDC!D9JTm14-GFCF84OLOY+C!!M zW7G|rsxnw&o=GjcQ95ZC-4Po<4dBoo7Uh<0Vf52~E>+mIp~cWR7&`Pao4=ETO~-MR zH)8=9{~d(OCO3&=cS>yO>x;>Jv?)$k?n9eueu3q!T3UK3gu8Xiq8eEue+Vi@of|WBlsRS+m{S`Lr4WbK8O?iKf|=!p}fZC z34Dp`f6zomhVb zO|JD44t|MX)3#fn^<5KB&GHm;tbRes5fOH7wB)`&Ci6kvI9jFrTFANWN6u2tuk|)< z7=GnHi5n&e7R?NouAQcBg{RPEiYYF;dWlA!+YAM*+ZENB`(fr)iM8i{13T_A<981W zAm)-cv?_;URp)m6;M+?Y*|QUEu3U!);TLtQvBfhvu5>S{9f$M^MgN)*7`{-_V#M|+ zcgxf#V?Avc_G2D4JE}0+Pf(~<3V6amOSq`jDCYjkfZFPw92lM{x+NA0lP>(El@&p> zY(SjgeLE5~OwQr6m;11N&;>CjbU%+>y$R;WIg|3nRM;Tb!NoSIFw(CQ0!${c{*pax z5*R@DE-m0NDWMPo@Xc<#wIsID}}mxJd+zQmf{ z-k2u#c-D{q?$1O`X|{E%-Dmh(QVFg%FLQe9e{lMZIfZT>OeZUq;YDHj~f?m(F_+-cqO5bFRXMV)O zH06VW?$Vdy)~^oKsYZYm;r3$H@9n7F!yRf?RZ*GsaEN-oNo+dm!K>}UvB+*ere9kP zADcD`0sW*sgmw=eqv(qNx$MXGUHb8!4_>@h-4GYqt)SMTXttRcMxntD&?@O>6-&19 zi-*%;>z6aoZqYD|4r`OuDWAhX%ccJRXakI??!ZYKrg6vF!%(R(gNHqS%32e5@(t4y zn4($=ZWCi<_RllO2T06sZO^9X^2GGX*TnIGJINt89`3pAlQglV{7Cjy%DU^u4dJ1< z=AUHe9B6=j3$1xaei(L{+80wHk`CzEz>xLRNLfvdeQx(Bzwd%Da!V3b=lvryt$}PV zWaHesl^`$d4aJe>l-0RHaJ~10w$EJ-c~%|ien=Fg)k<~TdRJ^Kcf}@)8Zm2%Gt8+j zgW&AnV0o!8%zJ;H-mEXAAbBykZ3t5&+4=LxKi_06XWN6_m)CHpb{Y7I{=DCTq+|0{N(z}RrK%qxao zF&n74(w0G9^k3t#^;APf2c^HYY`!{O{tM1} zTOmG3e^2LjeiF?vS!^{}hC*#DruzOAY}LI{FW&`sCgk$$5w`ODQBoiOcL4Ui-Ap@O zdX}qC=~y1WWW4lGm-N<>Z${EP;CQp&AY2^)t+T^LZKo%s6BLCfzbrx33l~|wJ|3+#!(n{VGeJXlJJ4I4Kw(8_g5##tep^Q5HB^w#zZteT6tQZLQ=tZ=#WxuAv)`#kle% z=-Fcoyq}UnMIo2@#>_}YnnUMu z!m)m^=k_^TADhLYKc3Q|m3cfmSD$?z#e#L$o~%{m!0(oK=Na}&s5>eIi-!*89jb*w zTx1>fSa${wPYU2#zam=yGM2v&HZI4OgFN4N3f9+3d*0|u%)8#T{I2pdbk!dzE*@IO zR)3Od_6n2o5ynz}vczyOmHO0>Uq?7;U1xSljKJSJJjL))4r282Srih`TT#&vK&!fc zf`!}E%ikJV$VYTMOV^Y&>A2Xx+(9#+v85ifwNA66V;rmWHsJQpPJ!$7;TY6S4ZA&g zOp}k*(wN(lXQ{axmtS9lCyaW)2{ldB#Uhrub-)J4Lh?+T0M)kA+(Bm^zkbjegT&bzv7!L=&Me1W zi6YxtnDO(5?o#fnKwGx8-2CkfYTpV64} zPbnvK1FHKr$_#HDVpAz^^blP{v)JR*y=8@5r%xy0cFt;+nH4#8-yRJwF1hf(b*8M6 z@lHHgZNqCi28f#iMv{4nfif-w=2BhbNvCh zYdcXKpEjFL1neZWDYM1AC2sInyC)s6zbc0M1o7}DQ;10$CszLSok!6wSU6DpS{4M{{gh`T`V@v{#2&EZX9=ioCo8}_Q0XiLd9YoImVg1g{%%*81L;M z^#soR*x@>=XddLi^Y3YNxeI;2l7dB=E@YK+Qt@Q|IfYs8TH4tsM7Z(M4wmifMHBaU zk=dG3*m7kx-MM2zF!MTmaUa6kD(O5pAW|{1{X!lo&f$vsZXoqpR2Cr_MkN6y9;i(uP z$f09y;M&f{_*AuqVm4&K#rPZKKBfosSel6*?Yj$^7q7$L$g%jhTRfZaGBLmEmXOeU zCAY0n#?MNkVqaVm{BRh=>s&oB`h1+2ea(wHjXp$hQyb3>Y=V)(Q#5&3FYG_1A%5Ex zDRBb^D?W@@Vy&B-0LJyAS5ay7Y49u9a`^x`Pi�(Y@f_>L0XCV;4>T=`M_(*(4S# zCP3l%8^V9R3aI&IPqNF^2X*s<^flED6JOsDI>zeov<4fljc0yW_0s9l+y2l?94Yh- zIYBJ>Ds+Z5LZ`6;esVQoU)7b=+rXR8AD=J2m>x|Z!*rB|pZ8*MsTm=`C_Q1%}&$9eEP0-CmnyLKM zfIqpr#W6pF zt+;kn0ffF(<^G*?uuHWiKU~y~@2_})d$X3{hkP%r$sQo(w55v{e*eUuu3`95@=K08 zmrkdZb;zs2xXdqiJS8kN3XMizob%4O+|6O05ZKZPIk}TDLg_MO`FG*J z6T0)~h(*|K%~ks9ZvgG@h}8CJ80)V*0Yk63;VQ?|Sli8ndV5q-cVji~|65 z%cXR?gC+Sch=wCgI-u$QQSmM0p5PeQm3$XwqV4!jxKLpt7ToI#aCR!r4vbZJo!BTG z?hr|zwFh$D)3L%m(;2wJd@(wQbjC+K0H>}hhk@!3$X5DJ`rbVXQ*&~0)5eQ@=7gv4 b&MKUje>noCsq0~*vKHCg%qvrw*9-m+vO>aZ literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/type.raw b/source/tests/pd/model/water/data/data_0/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pd/model/water/data/data_0/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pd/model/water/data/data_0/type_map.raw b/source/tests/pd/model/water/data/data_0/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pd/model/water/data/data_0/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pd/model/water/data/single/set.000/box.npy b/source/tests/pd/model/water/data/single/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..65897e0f9c5ec79e1a3182fc9b9bd6e00799244f GIT binary patch literal 164 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= gXCxM+0{I$-ItrGWItsN4WCN~HS$9VUB!G<%0BRf_UjP6A literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/coord.npy b/source/tests/pd/model/water/data/single/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..6e0594a8030bf6c9d7e3e2a4d1c7b9e5b421af1f GIT binary patch literal 2432 zcmbW0`yKSLG75FhocSMPePwHe zDG}%MkyhDAaw(@}QcH=XEYf6s|Ap`Kx98*6$Mbo}ea$LQ-wAR%4G4^>keQ2TcsCu|QUiNSf~zKtfgx(c`|H{sc96>7buPIJ#z zpzPcOmZ79gU#n@*lNTcNu2Do^iw>1Pe+Jzh3%HgJUl=YGk-6nWy0|J7)|wyKPT3Z| zQ`RT-TXL2ecU;AbQVn5sWgRjX+tc^Fn2BVzi0p8pyPG<3Niu<=6+>Ky7ZQzqQs)=u zZi4HGgx#t<%3kEHr4_xq(74Q)I29!dOX*-zEgPhm_DLWAZAm&#xmepcgVd%iCm+Rg z5V$$e#%reZXsZO;GNPKOV#x2>1Frvfj9z!c&8qo?&Hb=azljoA1*AIlxGVn#G1=9e z?m|u4q+-kO|LQW+xpM<^gY6MtFprcZj@0{YHJ0vHqa?L6NG@&06g?w49BD>&i?pTt zEdFGtxK><`nMtL?JzUa&FNHl6z})8wekpy)epN6g+t>`ePTdEu(Ow=_Dfm&UMwyRC zuwSOn2gE%Af60qqRel|pJ>5`g?MB%jJV^h@EZ%uu6W%|aO;eXzayd+iK1Ghf?xPQX z^;a>i(UYf{!&Nx&bTcyx)f7fc0`Pry3tSS$kP#^lnOz{+`bE>Re-xZ~a9=Qk!UKzNKeY-EOMFOFG7P&G1u8BXL&2oo z^mhA6WVKpR_RUb(8jiAD-vmg-%P2f+8yvaYMkd#c<)SFE9zmVEVSvBVF)?2I?+(0 zJAOM`2G1+1MNYq+zZXbCRE+r!;PNVLTwe*s9q3BH*BhK z?tVWKY9>-@;&k$rt4H>7UHEhfsATC>!mjP~`cV!m66@2Bh)x6s26O2h0<=9OGHa5f z2Y>&F`KHg=&S6z@X}rKC9Gi;B?PX{fua~moix@mUk5so!WFt4jNY~entdBgzhQ~AL zQ`IESKYlSy4jkalxjAELatG_r$Yjcgy(nedQRvBAQQK-|5_Zf+;-A{s_n!i;^cOR7 z&^eBjB00Kv)r~9-%h020MkcNr{CIK>4lVH@xA#9|!Ru5+saIg7TOxEWnb8cJHk^(B z9oA0y2PHGHvA9{0T$Q1^HNjIQj3 zOLr@8q>ziD=4rI?uo4+wdBh#mZ-VQT2)?KD3KT?pU{>Zy-@G9DBvIxCjdjqf)+K+* z1fge%3LV=#hJXqUUanhA(|hIVK)w{O5@*+N8$L($~brivU{;y-q1lLr3CR4Oj3jo{H*EVCOJ5AYw7YIdtiD$fgHCep{u!B7=JB8 zn(?fV(;t3{mzh6sE?XO+qUk{S>Kz!pYfqjDdrA54hC-Jd&8p9#$^jk{ z-LJ_$>Iw8OyhV5H2fWMQOOu?7k@A-|d4R)1hdpEo?!EN*ywEr_r*-vnU|%2+aF7u!4L6&5}*0RqBS=eR4C^%~GMD z3!gWX?aO^}WH$!C^Psd-sKPFg8rCA;8B|wnL}TJTIKm0Ze7qV0Z4q>e z?_mBDXJpN>oA#)8(XDn0Vdg1YXe4DJmZ7S_UdaEI=llEXRa1QB;QfOgaDhUmW z`H3|Sa6hj_LVpR2x@$4hID`hCPo{S92c&o_khgIPdD)~vwRRq@ZcT)Njy>K?-iyDc uuOO#;rYP*6E9Cv|<7JitZD`h|thghHADP2$ukfd>(&eo3oe~yBzs0}CO%F=| literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/energy.npy b/source/tests/pd/model/water/data/single/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..a0a88fb78ae09feb17e41593d6d8f60084479320 GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= aXCxM+0{I$-I+{8PwF(pfu7z)39s>Z!l^&G< literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/force.npy b/source/tests/pd/model/water/data/single/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..d5b847a86e3a5eea0476a8cd93210965b3cb44b9 GIT binary patch literal 2432 zcmbWr`6HEi8-Q_J$WjbRqE1PNk|>It=e`v&Dq&icNE~IdG$aivOIeaFh7`&cO_sC_ za-Q#fM50Nwn50rusD@FQrlv*By#K;`{r0)CRM0X|tRlS1yxo z-V9@3F3L*O+z?Z39`n}XeQc;dGwD{f@e=<;_QT{AA zN4n7_Baz+5bwN0Cu!>%k#IeG}zhPPJ5bS-aMKLWYSabL!##vd~owXg|?OVLjsM~|} zXRc+D3**sMuZ{eRJE*Ah1Oi@Ni~dW z^JA4;FVGE>a@^#;4TF0H(i~qM(iXhKcjisJ(^1!B7=dn(9yxob*sW{Cwh!!Xg;dj@Qs41=vT6D{C!efq2^*6=Int2!< z{*AQv(g)bpQ%%*YWI(v-0e7H83KbFs(#<=>MxD~w@^emX$3MehRwl$=K`*>%i(_8- z?Ibg!j$0w#fP3D@u#k)qR<+)rRq3QKeQ6Co9BSw4-YQGG_gPBWOItg&fajb!*Je9> zFNobsE5$T)WTsng!s35U0<*q`>g~?B7`n|j9RverqoHU9^wdGeT446j4`&P?Q{ zWRquLH7V`?z|9|?3Q;{Fe5OS>n-y`HY?aS*&Fj@j{-83sL^q3`?^gzW<%O_jrve&0 zJx!MiqR@rg1>)2y9PFGZ?U-XOZIMf&q@ur|eVz{uZ~_b-8slDtIbr>cQn+--i!ax7 zpzv#xQFqK7Cstl$b9OYsohEORQz#;v4XyAjhKF^2*ZJX(GHC3g%mUK7eBISLR@& zhVNotfE;tCEfGg4?|n0Ca99W>nh{j4Q$&yZf|$bcO7c`V#H_6DH{>MSaAvhHDP3g+ zx}C9vThU=q-j+<3Dgsi?f6KgAb;BfkTghJMpU53+sbp>mJwIeiXQZFuk&_w-)*CW8 zhf=0eG=aPWvmv)&7M&PAK=g42+2~KFl#W9qFi}F3-!lf6pprB=PN0rc^R{kiYVy0-D?0 zfIdkdtQt0?nvH++%a1<6h(uMp%+?^Tf3A>rE52cO4z|Lb_$WHR`2vesQ^7pfYQllc zCK{>lW%4Hqkkfw7cdaB4tK5Y_%{+GLQx5IVXk>*oIn3tjS2lL@Fh**ng3vUX*ECn* zj2=yc>kg&_R$2UFgC3M=TZxfM+88Yr(d&*r5Z87t4Dg#;h% z&tmVaJlTfiCFB>-53R@6v+_wZM6u1`&^j-SSuU7&c`Ela(|Gw6H1BSOH#)I2Yr`4d zG@^qQ`(&_n(cPkvv?}&u!hC#ZuR%J`uhZz=6ww;{Jj^c^ql#fa>!0rk16s-SYflgUlZpE8& zwp^@02FlC0Fg1=8FCaXt{=`(TnxgYuT`I4)WMvj{@a&Q-o47L=Zf{nFKUUm=WY2i| zB8tcEor)9|Vu|nd5-~R?8%|l>=cl*trWn6su7gveBcmE<^T8Vb{H2s`#ASfm>@H?z zUBYdgW6dq{TY|Z^^D*+}1AMsIf|Mo3*fuI-_t8cIxl`U#&`<~7bTh|FN;E4ujY zp2oDN*AY!zGPsAYs_|=rE_u5KQs~Nagt{RRKF=Q8|8k+pPsgENX)J4a9l{2m_Os5E zwN%zoNR=0+;IniUS~{)_HhPRBk8c`bY?}e^`|Kfat-2JX&F7$Pw3ZJX>0q_9)mh4( zqa;eW&DCx0z#0=-iKO^Wg8pyLv2%f2c=bnxlHxmc%)4U=`xlJPcc%aV literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/type.raw b/source/tests/pd/model/water/data/single/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pd/model/water/data/single/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pd/model/water/data/single/type_map.raw b/source/tests/pd/model/water/data/single/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pd/model/water/data/single/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pd/model/water/se_e2_a.json b/source/tests/pd/model/water/se_e2_a.json new file mode 100644 index 0000000000..96f51ba5aa --- /dev/null +++ b/source/tests/pd/model/water/se_e2_a.json @@ -0,0 +1,77 @@ +{ + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 20, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "numb_steps": 100000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 10000, + "_comment": "that's all" + }, + "_comment": "that's all" +} diff --git a/source/tests/pd/requirements.txt b/source/tests/pd/requirements.txt new file mode 100644 index 0000000000..74abad719e --- /dev/null +++ b/source/tests/pd/requirements.txt @@ -0,0 +1,6 @@ +tensorflow>=2.14.0 +deepmd-kit>=2.2.7 +dpdata +ase +coverage +pytest diff --git a/source/tests/pd/test_auto_batch_size.py b/source/tests/pd/test_auto_batch_size.py new file mode 100644 index 0000000000..1033f46d07 --- /dev/null +++ b/source/tests/pd/test_auto_batch_size.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np + +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) + + +class TestAutoBatchSize(unittest.TestCase): + def test_execute_all(self): + dd0 = np.zeros((10000, 2, 1, 3, 4)) + dd1 = np.ones((10000, 2, 1, 3, 4)) + auto_batch_size = AutoBatchSize(256, 2.0) + + def func(dd1): + return np.zeros_like(dd1), np.ones_like(dd1) + + dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) + np.testing.assert_equal(dd0, dd2[0]) + np.testing.assert_equal(dd1, dd2[1]) + + def test_execute_all_dict(self): + dd0 = np.zeros((10000, 2, 1, 3, 4)) + dd1 = np.ones((10000, 2, 1, 3, 4)) + auto_batch_size = AutoBatchSize(256, 2.0) + + def func(dd1): + return { + "foo": np.zeros_like(dd1), + "bar": np.ones_like(dd1), + } + + dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) + np.testing.assert_equal(dd0, dd2["foo"]) + np.testing.assert_equal(dd1, dd2["bar"]) diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py new file mode 100644 index 0000000000..09caa597bf --- /dev/null +++ b/source/tests/pd/test_finetune.py @@ -0,0 +1,380 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import tempfile +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.infer.deep_eval import ( + DeepEval, +) +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + +from .model.test_permutation import ( + model_dos, + model_dpa1, + model_dpa2, + model_se_e2_a, + model_zbl, +) + +energy_data_requirement = [ + DataRequirementItem( + "energy", + ndof=1, + atomic=False, + must=False, + high_prec=True, + ), + DataRequirementItem( + "force", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + "virial", + ndof=9, + atomic=False, + must=False, + high_prec=False, + ), + DataRequirementItem( + "dos", + ndof=250, + atomic=False, + must=False, + high_prec=True, + ), + DataRequirementItem( + "atom_ener", + ndof=1, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + "atom_pref", + ndof=1, + atomic=True, + must=False, + high_prec=False, + repeat=3, + ), +] + + +class FinetuneTest: + @unittest.skip( + "Paddle do not support finetune in frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_finetune_change_out_bias(self): + self.testkey = "energy" if self.testkey is None else self.testkey + # get data + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + # make sampled of multiple frames with different atom numbs + numb_atom = sampled[0]["atype"].shape[1] + small_numb_atom = numb_atom // 2 + small_atom_data = deepcopy(sampled[0]) + atomic_key = ["coord", "atype"] + for kk in atomic_key: + small_atom_data[kk] = small_atom_data[kk][:, :small_numb_atom] + scale_pref = float(small_numb_atom / numb_atom) + small_atom_data[self.testkey] *= scale_pref + small_atom_data["natoms"][:, :2] = small_numb_atom + small_atom_data["natoms"][:, 2:] = paddle.bincount( + small_atom_data["atype"][0], + minlength=small_atom_data["natoms"].shape[1] - 2, + ) + sampled = [sampled[0], small_atom_data] + + # get model + model = get_model(self.config["model"]).to(env.DEVICE) + atomic_model = model.atomic_model + atomic_model["out_bias"] = paddle.randn(atomic_model["out_bias"].shape) + energy_bias_before = to_numpy_array(atomic_model["out_bias"])[0] + + # prepare original model for test + dp = paddle.jit.to_static(model) + tmp_model = tempfile.NamedTemporaryFile(delete=False, suffix=".pd") + paddle.jit.save(dp, tmp_model.name) + dp = DeepEval(tmp_model.name) + origin_type_map = ["O", "H"] + full_type_map = ["O", "H", "B"] + + # change energy bias + model.atomic_model.change_out_bias( + sampled, + bias_adjust_mode="change-by-statistic", + ) + energy_bias_after = to_numpy_array(atomic_model["out_bias"])[0] + + # get ground-truth energy bias change + sorter = np.argsort(full_type_map) + idx_type_map = sorter[ + np.searchsorted(full_type_map, origin_type_map, sorter=sorter) + ] + ntest = 1 + atom_nums = np.tile( + np.bincount(to_numpy_array(sampled[0]["atype"][0]))[idx_type_map], + (ntest, 1), + ) + atom_nums_small = np.tile( + np.bincount(to_numpy_array(sampled[1]["atype"][0]))[idx_type_map], + (ntest, 1), + ) + atom_nums = np.concatenate([atom_nums, atom_nums_small], axis=0) + + energy = dp.eval( + to_numpy_array(sampled[0]["coord"][:ntest]), + to_numpy_array(sampled[0]["box"][:ntest]), + to_numpy_array(sampled[0]["atype"][0]), + )[0] + energy_small = dp.eval( + to_numpy_array(sampled[1]["coord"][:ntest]), + to_numpy_array(sampled[1]["box"][:ntest]), + to_numpy_array(sampled[1]["atype"][0]), + )[0] + energy_diff = to_numpy_array(sampled[0][self.testkey][:ntest]) - energy + energy_diff_small = ( + to_numpy_array(sampled[1][self.testkey][:ntest]) - energy_small + ) + energy_diff = np.concatenate([energy_diff, energy_diff_small], axis=0) + finetune_shift = ( + energy_bias_after[idx_type_map] - energy_bias_before[idx_type_map] + ).ravel() + ground_truth_shift = np.linalg.lstsq(atom_nums, energy_diff, rcond=None)[ + 0 + ].reshape(-1) + + # check values + np.testing.assert_almost_equal(finetune_shift, ground_truth_shift, decimal=10) + + self.tearDown() + + def test_finetune_change_type(self): + if not self.mixed_types: + # skip when not mixed_types + return + # get data + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + data_type_map = self.config["model"]["type_map"] + for [old_type_map, new_type_map] in [ + [["H", "X1", "X2", "O", "B"], ["O", "H", "B"]], + [["O", "H", "B"], ["H", "X1", "X2", "O", "B"]], + ]: + old_type_map_index = np.array( + [old_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + new_type_map_index = np.array( + [new_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + + # get pretrained model with old type map + config_old_type_map = deepcopy(self.config) + config_old_type_map["model"]["type_map"] = old_type_map + trainer = get_trainer(config_old_type_map) + trainer.run() + finetune_model = ( + config_old_type_map["training"].get("save_ckpt", "model.ckpt") + ".pd" + ) + + # finetune load the same type_map + config_old_type_map_finetune = deepcopy(self.config) + config_old_type_map_finetune["model"]["type_map"] = old_type_map + config_old_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_old_type_map_finetune["model"], + ) + trainer_finetune_old = get_trainer( + config_old_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # finetune load the slim type_map + config_new_type_map_finetune = deepcopy(self.config) + config_new_type_map_finetune["model"]["type_map"] = new_type_map + config_new_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_new_type_map_finetune["model"], + ) + trainer_finetune_new = get_trainer( + config_new_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test consistency + ntest = 1 + prec = 1e-10 + model_old_result = trainer_finetune_old.model( + sampled[0]["coord"][:ntest], + to_paddle_tensor(old_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + model_new_result = trainer_finetune_new.model( + sampled[0]["coord"][:ntest], + to_paddle_tensor(new_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + test_keys = ["energy", "force", "virial"] + for key in test_keys: + np.testing.assert_allclose( + model_old_result[key].numpy(), + model_new_result[key].numpy(), + rtol=prec, + atol=prec, + ) + + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyZBLModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_zbl) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyDOSModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "dos/input.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "dos/data/global_system")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dos) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = "dos" + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["repformer"]["nlayers"] = 2 + + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True + self.testkey = None + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_loss.py b/source/tests/pd/test_loss.py new file mode 100644 index 0000000000..a7b8109e10 --- /dev/null +++ b/source/tests/pd/test_loss.py @@ -0,0 +1,585 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() +from pathlib import ( + Path, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.tf.loss.ener import ( + EnerStdLoss, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + +from ..seed import ( + GLOBAL_SEED, +) +from .model.test_embedding_net import ( + get_single_batch, +) +from .test_finetune import ( + energy_data_requirement, +) + +CUR_DIR = os.path.dirname(__file__) + + +def get_batch(system, type_map, data_requirement): + dataset = DeepmdDataSetForLoader(system, type_map) + dataset.add_data_requirement(data_requirement) + np_batch, pd_batch = get_single_batch(dataset) + return np_batch, pd_batch + + +class LossCommonTest(unittest.TestCase): + def setUp(self): + self.cur_lr = 1.2 + if not self.spin: + self.system = str(Path(__file__).parent / "water/data/data_0") + self.type_map = ["H", "O"] + else: + self.system = str(Path(__file__).parent / "NiO/data/data_0") + self.type_map = ["Ni", "O"] + energy_data_requirement.append( + DataRequirementItem( + "force_mag", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ) + ) + # data + np_batch, pd_batch = get_batch( + self.system, self.type_map, energy_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + nframes = np_batch["energy"].shape[0] + rng = np.random.default_rng(GLOBAL_SEED) + + if not self.spin: + l_energy, l_force, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["virial"], + ) + p_energy, p_force, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force), + np.ones_like(l_virial), + ) + nloc = natoms[0] + batch_size = pd_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + # placeholders + l_force_real = l_force + l_force_mag = l_force + p_force_real = p_force + p_force_mag = p_force + else: + # data + np_batch, pd_batch = get_batch( + self.system, self.type_map, energy_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + l_energy, l_force_real, l_force_mag, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["force_mag"], + np_batch["virial"], + ) + # merged force for tf old implement + l_force_merge_tf = np.concatenate( + [ + l_force_real.reshape([nframes, self.nloc, 3]), + l_force_mag.reshape([nframes, self.nloc, 3])[ + np_batch["atype"] == 0 + ].reshape([nframes, -1, 3]), + ], + axis=1, + ).reshape([nframes, -1]) + p_energy, p_force_real, p_force_mag, p_force_merge_tf, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force_real), + np.ones_like(l_force_mag), + np.ones_like(l_force_merge_tf), + np.ones_like(l_virial), + ) + virt_nloc = (np_batch["atype"] == 0).sum(-1) + natoms_tf = np.concatenate([natoms, virt_nloc], axis=0) + natoms_tf[:2] += virt_nloc + nloc = natoms_tf[0] + batch_size = pd_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + self.nloc_tf = nloc + natoms = natoms_tf + l_force = l_force_merge_tf + p_force = p_force_merge_tf + + # tf + self.g = tf.Graph() + with self.g.as_default(): + t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) + t_natoms = tf.placeholder(shape=[None], dtype=tf.int32) + t_penergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_pforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_pvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_patom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lenergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_lforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_latom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_pref = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_ener_coeff = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_drdq = tf.placeholder(shape=[None, None], dtype=tf.float64) + find_energy = tf.constant(1.0, dtype=tf.float64) + find_force = tf.constant(1.0, dtype=tf.float64) + find_virial = tf.constant(1.0 if not self.spin else 0.0, dtype=tf.float64) + find_atom_energy = tf.constant(1.0, dtype=tf.float64) + find_atom_pref = tf.constant(1.0, dtype=tf.float64) + find_drdq = tf.constant(1.0, dtype=tf.float64) + find_atom_ener_coeff = tf.constant(1.0, dtype=tf.float64) + model_dict = { + "energy": t_penergy, + "force": t_pforce, + "virial": t_pvirial, + "atom_ener": t_patom_energy, + } + label_dict = { + "energy": t_lenergy, + "force": t_lforce, + "virial": t_lvirial, + "atom_ener": t_latom_energy, + "atom_pref": t_atom_pref, + "drdq": t_drdq, + "atom_ener_coeff": t_atom_ener_coeff, + "find_energy": find_energy, + "find_force": find_force, + "find_virial": find_virial, + "find_atom_ener": find_atom_energy, + "find_atom_pref": find_atom_pref, + "find_drdq": find_drdq, + "find_atom_ener_coeff": find_atom_ener_coeff, + } + self.tf_loss_sess = self.tf_loss.build( + t_cur_lr, t_natoms, model_dict, label_dict, "" + ) + + self.feed_dict = { + t_cur_lr: self.cur_lr, + t_natoms: natoms, + t_penergy: p_energy, + t_pforce: p_force, + t_pvirial: p_virial.reshape([-1, 9]), + t_patom_energy: p_atom_energy, + t_lenergy: l_energy, + t_lforce: l_force, + t_lvirial: l_virial.reshape([-1, 9]), + t_latom_energy: l_atom_energy, + t_atom_pref: atom_pref, + t_drdq: drdq, + t_atom_ener_coeff: atom_ener_coeff, + } + # pd + if not self.spin: + self.model_pred = { + "energy": paddle.to_tensor(p_energy), + "force": paddle.to_tensor(p_force), + "virial": paddle.to_tensor(p_virial), + "atom_energy": paddle.to_tensor(p_atom_energy), + } + self.label = { + "energy": paddle.to_tensor(l_energy), + "find_energy": 1.0, + "force": paddle.to_tensor(l_force), + "find_force": 1.0, + "virial": paddle.to_tensor(l_virial), + "find_virial": 1.0, + "atom_ener": paddle.to_tensor(l_atom_energy), + "find_atom_ener": 1.0, + "atom_pref": paddle.to_tensor(atom_pref), + "find_atom_pref": 1.0, + "drdq": paddle.to_tensor(drdq), + "find_drdq": 1.0, + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": paddle.to_tensor(l_energy), + "force": paddle.to_tensor(l_force), + "virial": paddle.to_tensor(l_virial), + "atom_ener": paddle.to_tensor(l_atom_energy), + "atom_pref": paddle.to_tensor(atom_pref), + "drdq": paddle.to_tensor(drdq), + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + } + else: + self.model_pred = { + "energy": paddle.to_tensor(p_energy), + "force": paddle.to_tensor(p_force_real).reshape( + [nframes, self.nloc, 3] + ), + "force_mag": paddle.to_tensor(p_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "mask_mag": paddle.to_tensor(np_batch["atype"] == 0).reshape( + [nframes, self.nloc, 1] + ), + "atom_energy": paddle.to_tensor(p_atom_energy), + } + self.label = { + "energy": paddle.to_tensor(l_energy), + "find_energy": 1.0, + "force": paddle.to_tensor(l_force_real).reshape( + [nframes, self.nloc, 3] + ), + "find_force": 1.0, + "force_mag": paddle.to_tensor(l_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "find_force_mag": 1.0, + "atom_ener": paddle.to_tensor(l_atom_energy), + "find_atom_ener": 1.0, + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": paddle.to_tensor(l_energy), + "force": paddle.to_tensor(l_force_real).reshape( + [nframes, self.nloc, 3] + ), + "force_mag": paddle.to_tensor(l_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "atom_ener": paddle.to_tensor(l_atom_energy), + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + } + self.natoms = pd_batch["natoms"] + + def tearDown(self) -> None: + tf.reset_default_graph() + return super().tearDown() + + +class TestEnerStdLoss(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossAePfGf(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + self.start_pref_ae = 0.02 + self.limit_pref_ae = 1.0 + self.start_pref_pf = 0.02 + self.limit_pref_pf = 1.0 + self.start_pref_gf = 0.02 + self.limit_pref_gf = 1.0 + self.numb_generalized_coord = 2 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial", "atom_ener", "pref_force", "gen_force"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossAecoeff(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossRelativeF(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + relative_f=0.1, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + relative_f=0.1, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_lr.py b/source/tests/pd/test_lr.py new file mode 100644 index 0000000000..f5ce911b04 --- /dev/null +++ b/source/tests/pd/test_lr.py @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +from deepmd.pd.utils.learning_rate import ( + LearningRateExp, +) +from deepmd.tf.utils import ( + learning_rate, +) + + +class TestLearningRate(unittest.TestCase): + def setUp(self): + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = np.arange(400, 601, 100) + self.stop_steps = np.arange(500, 1600, 500) + + def test_consistency(self): + for decay_step in self.decay_steps: + for stop_step in self.stop_steps: + self.decay_step = decay_step + self.stop_step = stop_step + self.judge_it() + self.decay_rate_pt() + + def judge_it(self): + base_lr = learning_rate.LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step + ) + g = tf.Graph() + with g.as_default(): + global_step = tf.placeholder(shape=[], dtype=tf.int32) + t_lr = base_lr.build(global_step, self.stop_step) + + my_lr = LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step, self.stop_step + ) + with tf.Session(graph=g) as sess: + base_vals = [ + sess.run(t_lr, feed_dict={global_step: step_id}) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals = [ + my_lr.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + self.assertTrue(np.allclose(base_vals, my_vals)) + tf.reset_default_graph() + + def decay_rate_pt(self): + my_lr = LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step, self.stop_step + ) + + default_ds = 100 if self.stop_step // 10 > 100 else self.stop_step // 100 + 1 + if self.decay_step >= self.stop_step: + self.decay_step = default_ds + decay_rate = np.exp( + np.log(self.stop_lr / self.start_lr) / (self.stop_step / self.decay_step) + ) + my_lr_decay = LearningRateExp( + self.start_lr, + 1e-10, + self.decay_step, + self.stop_step, + decay_rate=decay_rate, + ) + min_lr = 1e-5 + my_lr_decay_trunc = LearningRateExp( + self.start_lr, + min_lr, + self.decay_step, + self.stop_step, + decay_rate=decay_rate, + ) + my_vals = [ + my_lr.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals_decay = [ + my_lr_decay.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals_decay_trunc = [ + my_lr_decay_trunc.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + self.assertTrue(np.allclose(my_vals_decay, my_vals)) + self.assertTrue( + np.allclose(my_vals_decay_trunc, np.clip(my_vals, a_min=min_lr, a_max=None)) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_neighbor_stat.py b/source/tests/pd/test_neighbor_stat.py new file mode 100644 index 0000000000..613150b7fc --- /dev/null +++ b/source/tests/pd/test_neighbor_stat.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import shutil +import unittest + +import dpdata +import numpy as np + +from deepmd.entrypoints.neighbor_stat import ( + neighbor_stat, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +def gen_sys(nframes): + rng = np.random.default_rng(GLOBAL_SEED) + natoms = 1000 + data = {} + X, Y, Z = np.mgrid[0:2:3j, 0:2:3j, 0:2:3j] + positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T # + 0.1 + data["coords"] = np.repeat(positions[np.newaxis, :, :], nframes, axis=0) + data["forces"] = rng.random([nframes, natoms, 3]) + data["cells"] = np.array([3.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 3.0]).reshape( + 1, 3, 3 + ) + data["energies"] = rng.random([nframes, 1]) + data["atom_names"] = ["TYPE"] + data["atom_numbs"] = [27] + data["atom_types"] = np.repeat(0, 27) + return data + + +class TestNeighborStat(unittest.TestCase): + def setUp(self): + data0 = gen_sys(1) + sys0 = dpdata.LabeledSystem() + sys0.data = data0 + sys0.to_deepmd_npy("system_0", set_size=1) + + def tearDown(self): + shutil.rmtree("system_0") + + def test_neighbor_stat(self): + for rcut in (0.0, 1.0, 2.0, 4.0): + for mixed_type in (True, False): + with self.subTest(rcut=rcut, mixed_type=mixed_type): + rcut += 1e-3 # prevent numerical errors + min_nbor_dist, max_nbor_size = neighbor_stat( + system="system_0", + rcut=rcut, + type_map=["TYPE", "NO_THIS_TYPE"], + mixed_type=mixed_type, + backend="paddle", + ) + upper = np.ceil(rcut) + 1 + X, Y, Z = np.mgrid[-upper:upper, -upper:upper, -upper:upper] + positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T + # distance to (0,0,0) + distance = np.linalg.norm(positions, axis=1) + expected_neighbors = np.count_nonzero( + np.logical_and(distance > 0, distance <= rcut) + ) + self.assertAlmostEqual(min_nbor_dist, 1.0, 6) + ret = [expected_neighbors] + if not mixed_type: + ret.append(0) + np.testing.assert_array_equal(max_nbor_size, ret) diff --git a/source/tests/pd/test_sampler.py b/source/tests/pd/test_sampler.py new file mode 100644 index 0000000000..2af5a9c05c --- /dev/null +++ b/source/tests/pd/test_sampler.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + BatchSampler, + DataLoader, +) + +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, + get_weighted_sampler, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.utils import random as tf_random +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestSampler(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.batch_size = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.my_dataset = DpLoaderSet( + self.systems, + self.batch_size, + model_config["type_map"], + seed=10, + shuffle=False, + ) + + tf_random.seed(10) + self.dp_dataset = DeepmdDataSystem(self.systems, self.batch_size, 1, self.rcut) + + def test_sampler_debug_info(self): + dataloader = DataLoader( + self.my_dataset, + batch_sampler=BatchSampler( + get_weighted_sampler(self.my_dataset, prob_style="prob_sys_size"), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + # pin_memory=True, + ) + device = paddle.get_device() + paddle.set_device("cpu") + batch_data = next(iter(dataloader)) + paddle.set_device(device) + sid = batch_data["sid"] + fid = batch_data["fid"][0] + coord = batch_data["coord"].squeeze(0) + frame = self.my_dataset.systems[sid].__getitem__(fid) + self.assertTrue(np.allclose(coord, frame["coord"])) + + def test_auto_prob_uniform(self): + auto_prob_style = "prob_uniform" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size(self): + auto_prob_style = "prob_sys_size" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size_ext(self): + auto_prob_style = "prob_sys_size;0:1:0.2;1:3:0.8" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_sys_probs(self): + sys_probs = [0.1, 0.4, 0.5] + sampler = get_weighted_sampler( + self.my_dataset, prob_style=sys_probs, sys_prob=True + ) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(sys_probs=sys_probs) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_update_sel.py b/source/tests/pd/test_update_sel.py new file mode 100644 index 0000000000..e7b1acf6ff --- /dev/null +++ b/source/tests/pd/test_update_sel.py @@ -0,0 +1,194 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import random +import unittest +from unittest.mock import ( + patch, +) + +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +def update_sel(jdata): + type_map = jdata["model"].get("type_map") + train_data = None + jdata["model"], _ = BaseModel.update_sel(train_data, type_map, jdata["model"]) + return jdata + + +class TestTrain(unittest.TestCase): + def setUp(self) -> None: + self.update_sel = UpdateSel() + self.mock_min_nbor_dist = random.Random(GLOBAL_SEED).random() + return super().setUp() + + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_one_sel(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto") + # self.assertEqual(descriptor['sel'], [11,22]) + self.assertEqual(sel, [12, 24]) + self.assertAlmostEqual(min_nbor_dist, self.mock_min_nbor_dist) + min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto:1.5") + # self.assertEqual(descriptor['sel'], [15,30]) + self.assertEqual(sel, [16, 32]) + self.assertAlmostEqual(min_nbor_dist, self.mock_min_nbor_dist) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_hybrid(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + jdata = { + "model": { + "descriptor": { + "type": "hybrid", + "list": [ + {"type": "se_e2_a", "rcut": 6, "sel": "auto"}, + {"type": "se_e2_a", "rcut": 6, "sel": "auto:1.5"}, + ], + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "hybrid", + "list": [ + {"type": "se_e2_a", "rcut": 6, "sel": [12, 24]}, + {"type": "se_e2_a", "rcut": 6, "sel": [16, 32]}, + ], + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + jdata = { + "model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": "auto"}}, + "training": {"training_data": {}}, + } + expected_out = { + "model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": [12, 24]}}, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_auto(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": "auto", + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 28, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_int(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_list(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_skip_frozen(self): + jdata = { + "model": { + "type": "frozen", + }, + "training": {"training_data": {}}, + } + expected_out = jdata.copy() + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_wrap_up_4(self): + self.assertEqual(self.update_sel.wrap_up_4(12), 3 * 4) + self.assertEqual(self.update_sel.wrap_up_4(13), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(14), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(15), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(16), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(17), 5 * 4) diff --git a/source/tests/pd/test_utils.py b/source/tests/pd/test_utils.py new file mode 100644 index 0000000000..8d25cff964 --- /dev/null +++ b/source/tests/pd/test_utils.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +class TestCvt(unittest.TestCase): + def test_to_numpy(self): + rng = np.random.default_rng(GLOBAL_SEED) + foo = rng.normal([3, 4]) + for ptp, npp in zip( + [paddle.float16, paddle.float32, paddle.float64], + [np.float16, np.float32, np.float64], + ): + foo = foo.astype(npp) + bar = to_paddle_tensor(foo) + self.assertEqual(bar.dtype, ptp) + onk = to_numpy_array(bar) + self.assertEqual(onk.dtype, npp) + with self.assertRaises(ValueError) as ee: + foo = foo.astype(np.int8) + bar = to_paddle_tensor(foo) + with self.assertRaises(ValueError) as ee: + bar = to_paddle_tensor(foo) + bar = to_numpy_array(bar.int()) diff --git a/source/tests/pd/water b/source/tests/pd/water new file mode 120000 index 0000000000..9e74b75a82 --- /dev/null +++ b/source/tests/pd/water @@ -0,0 +1 @@ +model/water/ \ No newline at end of file From 72c9b4ef02f6cf804cec54d23ce9951e72bcc219 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 00:45:41 +0800 Subject: [PATCH 05/58] fix pt->pd --- source/tests/pd/model/test_descriptor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py index 386c68595b..0239b0a03f 100644 --- a/source/tests/pd/model/test_descriptor.py +++ b/source/tests/pd/model/test_descriptor.py @@ -119,7 +119,7 @@ def setUp(self): model_config["type_map"], ) ds.add_data_requirement(energy_data_requirement) - self.np_batch, self.pt_batch = get_single_batch(ds) + self.np_batch, self.pd_batch = get_single_batch(ds) self.sec = np.cumsum(self.sel) self.ntypes = len(self.sel) self.nnei = sum(self.sel) @@ -142,8 +142,8 @@ def test_consistency(self): stddev=std_ones.detach().cpu(), ) - pt_coord = self.pt_batch["coord"].to(env.DEVICE) - atype = self.pt_batch["atype"].to(env.DEVICE) + pt_coord = self.pd_batch["coord"].to(env.DEVICE) + atype = self.pd_batch["atype"].to(env.DEVICE) pt_coord.stop_gradient = False ( extended_coord, @@ -152,11 +152,11 @@ def test_consistency(self): nlist, ) = extend_input_and_build_neighbor_list( pt_coord, - self.pt_batch["atype"].to(env.DEVICE), + self.pd_batch["atype"].to(env.DEVICE), self.rcut, self.sel, mixed_types=False, - box=self.pt_batch["box"].to(env.DEVICE), + box=self.pd_batch["box"].to(env.DEVICE), ) my_d, _, _ = prod_env_mat( extended_coord, From 3b1c3489c5b4f4f1a9aa043f19782b12fd44e811 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 00:53:39 +0800 Subject: [PATCH 06/58] update test_python.yml --- .github/workflows/test_python.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index e46bddd98a..033e4c6ba3 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -29,6 +29,7 @@ jobs: source/install/uv_with_retry.sh pip install --system torch -i https://download.pytorch.org/whl/cpu export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') source/install/uv_with_retry.sh pip install --system --only-binary=horovod -e .[cpu,test,jax] horovod[tensorflow-cpu] mpi4py + source/install/uv_with_retry.sh pip install --system --pre "paddlepaddle" -i https://www.paddlepaddle.org.cn/packages/nightly/cpu/ env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses From a46dcb53b02beea6d5cd6572a3aed2f62d58657f Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 13:14:06 +0800 Subject: [PATCH 07/58] restore .pre-commit-config.yaml --- .pre-commit-config.yaml | 52 ++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a22ac7442..6cb534fd22 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v4.0.0-alpha.8 - # hooks: - # - id: prettier - # types_or: [markdown, yaml, css] - # # workflow files cannot be modified by pre-commit.ci - # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [markdown, yaml, css] + # workflow files cannot be modified by pre-commit.ci + exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - # - repo: https://github.com/njzjz/mirrors-bibtex-tidy - # rev: v1.13.0 - # hooks: - # - id: bibtex-tidy - # args: - # - --curly - # - --numeric - # - --align=13 - # - --blank-lines - # # disable sort: the order of keys and fields has explict meanings - # #- --sort=key - # - --duplicates=key,doi,citation,abstract - # - --merge=combine - # #- --sort-fields - # #- --strip-comments - # - --trailing-commas - # - --encode-urls - # - --remove-empty-fields - # - --wrap=80 + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 From 90f9ff9c75edff231be36e3913c19fe780c5cd21 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 15:57:56 +0800 Subject: [PATCH 08/58] remove redundant file --- deepmd/pd/utils/no_use_init.py | 515 --------------------------------- 1 file changed, 515 deletions(-) delete mode 100644 deepmd/pd/utils/no_use_init.py diff --git a/deepmd/pd/utils/no_use_init.py b/deepmd/pd/utils/no_use_init.py deleted file mode 100644 index 9f363d6db0..0000000000 --- a/deepmd/pd/utils/no_use_init.py +++ /dev/null @@ -1,515 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -""" -The initialization method under this module is aligned with pytorch initialization. -If you need to use the initialization method of PaddlePaddle, please refer to -[paddle.nn.initializer](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/nn/initializer). - -This code is based on [torch.nn.init](https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py) -Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file. -""" - -from __future__ import ( - annotations, -) - -import math -import warnings - -import numpy as np -import paddle -from paddle import ( - nn, -) -from typing_extensions import ( - Literal, -) - -__all__ = [ - "uniform_", - "normal_", - "trunc_normal_", - "glorot_normal_", - "constant_", - "ones_", - "zeros_", - "xavier_uniform_", - "xavier_normal_", - "kaiming_uniform_", - "kaiming_normal_", - "linear_init_", - "conv_init_", -] - - -def _no_grad_uniform_(tensor, a, b): - with paddle.no_grad(): - tensor.set_value( - paddle.uniform(shape=tensor.shape, dtype=tensor.dtype, min=a, max=b) - ) - return tensor - - -def _no_grad_normal_(tensor, mean=0.0, std=1.0): - with paddle.no_grad(): - tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape)) - return tensor - - -def _no_grad_trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn( - f"mean({mean}) is more than 2 std({std}) from [a, b]([{a}, {b}]) in _no_grad_trunc_normal_. " - "The distribution of values may be incorrect." - ) - - with paddle.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - _tensor = paddle.uniform( - shape=tensor.shape, dtype=tensor.dtype, min=2 * l - 1, max=2 * u - 1 - ) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - _tensor.erfinv_() - - # Transform to proper mean, std - _tensor = paddle.multiply( - _tensor, paddle.to_tensor(std * math.sqrt(2.0), tensor.dtype) - ) - _tensor = paddle.add(_tensor, paddle.to_tensor(mean, tensor.dtype)) - - # Clamp to ensure it"s in the proper range - _tensor = paddle.clip(_tensor, min=a, max=b) - tensor.set_value(_tensor) - return tensor - - -def _no_grad_fill_(tensor, value=0.0): - with paddle.no_grad(): - tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype)) - return tensor - - -def uniform_(tensor: paddle.Tensor, a: float, b: float) -> paddle.Tensor: - """Modify tensor inplace using uniform_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - a (float): Min value. - b (float): Max value. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.uniform_(param, -1, 1) - """ - return _no_grad_uniform_(tensor, a, b) - - -def normal_( - tensor: paddle.Tensor, mean: float = 0.0, std: float = 1.0 -) -> paddle.Tensor: - """Modify tensor inplace using normal_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - mean (float, optional): Mean value. Defaults to 0.0. - std (float, optional): Std value. Defaults to 1.0. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.normal_(param, 0, 1) - """ - return _no_grad_normal_(tensor, mean, std) - - -def trunc_normal_( - tensor: paddle.Tensor, - mean: float = 0.0, - std: float = 1.0, - a: float = -2.0, - b: float = 2.0, -) -> paddle.Tensor: - """Modify tensor inplace using trunc_normal_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - mean (float, optional): The mean of the normal distribution. Defaults to 0.0. - std (float, optional): The standard deviation of the normal distribution. Defaults to 1.0. - a (float, optional): The minimum cutoff value. Defaults to -2.0. - b (float, optional): The maximum cutoff value. Defaults to 2.0. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.trunc_normal_(param, 0.0, 1.0) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - - -def constant_(tensor: paddle.Tensor, value: float = 0.0) -> paddle.Tensor: - """Modify tensor inplace using constant_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - value (float, optional): Value to fill tensor. Defaults to 0.0. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.constant_(param, 2) - """ - return _no_grad_fill_(tensor, value) - - -def ones_(tensor: paddle.Tensor) -> paddle.Tensor: - """Modify tensor inplace using ones_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.ones_(param) - """ - return _no_grad_fill_(tensor, 1) - - -def zeros_(tensor: paddle.Tensor) -> paddle.Tensor: - """Modify tensor inplace using zeros_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.zeros_(param) - """ - return _no_grad_fill_(tensor, 0) - - -def _calculate_fan_in_and_fan_out(tensor, reverse=False): - """ - Calculate (fan_in, _fan_out) for tensor. - - Args: - tensor (paddle.Tensor): paddle.Tensor. - reverse (bool): Tensor data format order, False by default as [fout, fin, ...]. - e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] - is True. - - Return: - Tuple[float, float]: (fan_in, fan_out). - """ - if tensor.ndim < 2: - raise ValueError( - f"tensor.ndim should be no less than 2, but got {tensor.ndim}." - ) - - if reverse: - num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] - else: - num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] - - receptive_field_size = 1 - if tensor.ndim > 2: - receptive_field_size = np.prod(tensor.shape[2:]) - - fan_in = num_input_fmaps * receptive_field_size - fan_out = num_output_fmaps * receptive_field_size - - return fan_in, fan_out - - -def xavier_uniform_( - tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False -) -> paddle.Tensor: - """Modify tensor inplace using xavier_uniform_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - gain (float, optional): Hyperparameter. Defaults to 1.0. - reverse (bool, optional): Tensor data format order, False by default as - [fout, fin, ...].. Defaults to False. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.xavier_uniform_(param) - """ - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) - std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) - k = math.sqrt(3.0) * std - return _no_grad_uniform_(tensor, -k, k) - - -def xavier_normal_( - tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False -) -> paddle.Tensor: - """Modify tensor inplace using xavier_normal_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - gain (float, optional): Hyperparameter. Defaults to 1.0. - reverse (bool, optional): Tensor data format order, False by - default as [fout, fin, ...]. Defaults to False. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.xavier_normal_(param) - """ - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) - std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) - return _no_grad_normal_(tensor, 0, std) - - -# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html -def _calculate_correct_fan(tensor, mode, reverse=False): - mode = mode.lower() - valid_modes = ["fan_in", "fan_out"] - if mode not in valid_modes: - raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") - - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) - - return fan_in if mode == "fan_in" else fan_out - - -def _calculate_gain(nonlinearity, param=None): - linear_fns = [ - "linear", - "conv1d", - "conv2d", - "conv3d", - "conv_transpose1d", - "conv_transpose2d", - "conv_transpose3d", - ] - if nonlinearity in linear_fns or nonlinearity == "sigmoid": - return 1 - elif nonlinearity == "tanh": - return 5.0 / 3 - elif nonlinearity == "relu": - return math.sqrt(2.0) - elif nonlinearity == "leaky_relu": - if param is None: - negative_slope = 0.01 - elif ( - not isinstance(param, bool) - and isinstance(param, int) - or isinstance(param, float) - ): - # True/False are instances of int, hence check above - negative_slope = param - else: - raise ValueError(f"negative_slope {param} not a valid number") - return math.sqrt(2.0 / (1 + negative_slope**2)) - elif nonlinearity == "selu": - return 3.0 / 4 - else: - raise ValueError(f"Unsupported nonlinearity {nonlinearity}") - - -def kaiming_uniform_( - tensor: paddle.Tensor, - a: float = 0, - mode: Literal["fan_in", "fan_out"] = "fan_in", - nonlinearity: str = "leaky_relu", - reverse: bool = False, -) -> paddle.Tensor: - """Modify tensor inplace using kaiming_uniform method. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - a (float, optional): The negative slope of the rectifier used after this layer. - Defaults to 0. - mode (Literal["fan_in", "fan_out"], optional): - ["fan_in", "fan_out"]. Defaults to "fan_in". - nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". - reverse (bool, optional): Tensor data format order, False by default as - [fout, fin, ...].. Defaults to False. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.kaiming_uniform_(param) - """ - fan = _calculate_correct_fan(tensor, mode, reverse) - gain = _calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - k = math.sqrt(3.0) * std - return _no_grad_uniform_(tensor, -k, k) - - -def kaiming_normal_( - tensor: paddle.Tensor, - a: float = 0, - mode: Literal["fan_in", "fan_out"] = "fan_in", - nonlinearity: str = "leaky_relu", - reverse: bool = False, -) -> paddle.Tensor: - """Modify tensor inplace using kaiming_normal_. - - Args: - tensor (paddle.Tensor): Paddle Tensor. - a (float, optional): The negative slope of the rectifier used after this layer. - Defaults to 0. - mode (Literal["fan_in", "fan_out"], optional): Either - 'fan_in' (default) or 'fan_out'. Defaults to "fan_in". - nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". - reverse (bool, optional): Tensor data format order. Defaults to False. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.kaiming_normal_(param) - """ - fan = _calculate_correct_fan(tensor, mode, reverse) - gain = _calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - return _no_grad_normal_(tensor, 0, std) - - -def linear_init_(module: nn.Layer) -> None: - """Initialize module's weight and bias as it is a linear layer. - - Args: - module (nn.Layer): Linear Layer to be initialized. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> layer = paddle.nn.Linear(128, 256) - >>> ppsci.utils.initializer.linear_init_(layer) - """ - kaiming_uniform_(module.weight, a=math.sqrt(5)) - if module.bias is not None: - fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=True) - bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 - uniform_(module.bias, -bound, bound) - - -def conv_init_(module: nn.Layer) -> None: - """Initialize module's weight and bias as it is a conv layer. - - Args: - module (nn.Layer): Convolution Layer to be initialized. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> layer = paddle.nn.Conv2D(4, 16, 2) - >>> ppsci.utils.initializer.conv_init_(layer) - """ - kaiming_uniform_(module.weight, a=math.sqrt(5)) - if module.bias is not None: - fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=False) - if fan_in != 0: - bound = 1 / math.sqrt(fan_in) - uniform_(module.bias, -bound, bound) - - -def glorot_normal_(tensor: paddle.Tensor) -> paddle.Tensor: - """Modify tensor inplace using jax-style glorot_normal. - - Args: - tensor (paddle.Tensor): Paddle Tensor/Paramter. - - Returns - ------- - paddle.Tensor: Initialized tensor. - - Examples - -------- - >>> import paddle - >>> import ppsci - >>> param = paddle.empty((128, 256), "float32") - >>> param = ppsci.utils.initializer.glorot_normal_(param) - """ - assert ( - tensor.ndim == 2 - ), f"glorot_normal_ only support 2D tensor now, but got ndim={tensor.ndim}" - fin, fout = tensor.shape - var = 2.0 / (fin + fout) - stddev = math.sqrt(var) * 0.87962566103423978 - trunc_normal_(tensor) - tensor.set_value(tensor * stddev) - return tensor From 0a6baa6ba7a17bb866f1a28e77d5f98cc26dd600 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 18:16:38 +0800 Subject: [PATCH 09/58] Skip bfloat16 for some cases --- source/tests/consistent/fitting/test_ener.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 580c411b78..6bc5e62bee 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -130,7 +130,9 @@ def skip_pd(self) -> bool: (numb_aparam, use_aparam_as_mask), atom_ener, ) = self.param - return CommonTest.skip_pd + # Paddle do not support "bfloat16" in some kernels, + # so skip this in CI test + return CommonTest.skip_pd or precision == "bfloat16" tf_class = EnerFittingTF dp_class = EnerFittingDP From 4b77e5545bc72d4b7fef9903d13ccca3ed2d74ba Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 3 Nov 2024 18:34:45 +0800 Subject: [PATCH 10/58] enable prim by default in unitest --- source/tests/pd/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/tests/pd/__init__.py b/source/tests/pd/__init__.py index 6ceb116d85..374fca3a03 100644 --- a/source/tests/pd/__init__.py +++ b/source/tests/pd/__init__.py @@ -1 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.pd.utils import ( + env, +) +env.enable_prim(True) From 6e139a29233ce073b6268b2bd8e97b3010febe2e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 3 Nov 2024 10:36:10 +0000 Subject: [PATCH 11/58] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/tests/pd/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source/tests/pd/__init__.py b/source/tests/pd/__init__.py index 374fca3a03..8d1616afb2 100644 --- a/source/tests/pd/__init__.py +++ b/source/tests/pd/__init__.py @@ -2,4 +2,5 @@ from deepmd.pd.utils import ( env, ) + env.enable_prim(True) From 943795795f9d0947c4f4aba688c6bbed7459237e Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 5 Nov 2024 10:48:23 +0800 Subject: [PATCH 12/58] fix env code --- deepmd/pd/utils/env.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 54aa0cf058..3faf6f22e0 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -71,9 +71,9 @@ set_default_nthreads() inter_nthreads, intra_nthreads = get_default_nthreads() # if inter_nthreads > 0: # the behavior of 0 is not documented -# paddle.set_num_interop_threads(inter_nthreads) +# os.environ['OMP_NUM_THREADS'] = str(inter_nthreads) # if intra_nthreads > 0: -# paddle.framework.core.set_num_threads(intra_nthreads) +# os.environ['CPU_NUM'] = str(intra_nthreads) def enable_prim(enable: bool = True): From c22b45d367d843027c8d0d59a67c65570f26a778 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 6 Nov 2024 09:54:56 +0800 Subject: [PATCH 13/58] update test_ener.py --- source/tests/consistent/model/test_ener.py | 46 +++++++++++++++++++--- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 171b2ca024..4ce09446b9 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -49,7 +49,8 @@ if INSTALLED_PD: from deepmd.pd.model.model import get_model as get_model_pd from deepmd.pd.model.model.ener_model import EnergyModel as EnergyModelPD - + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy + from deepmd.pd.utils.utils import to_paddle_tensor as numpy_to_paddle else: EnergyModelPD = None from deepmd.utils.argcheck import ( @@ -232,6 +233,15 @@ def eval_jax(self, jax_obj: Any) -> Any: self.box, ) + def eval_pd(self, pt_obj: Any) -> Any: + return self.eval_pd_model( + pt_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: @@ -268,11 +278,11 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ) elif backend is self.RefBackend.PD: return ( - ret["energy"].ravel(), - ret["atom_energy"].ravel(), - ret["force"].ravel(), - ret["virial"].ravel(), - ret["atom_virial"].ravel(), + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["force"].flatten(), + ret["virial"].flatten(), + ret["atom_virial"].flatten(), ) raise ValueError(f"Unknown backend: {backend}") @@ -338,6 +348,8 @@ def get_reference_backend(self): return self.RefBackend.JAX if not self.skip_dp: return self.RefBackend.DP + if not self.skip_pd: + return self.RefBackend.PD raise ValueError("No available reference") @property @@ -358,6 +370,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_pt(data) elif cls is EnergyModelJAX: return get_model_jax(data) + elif cls is EnergyModelPD: + return get_model_pd(data) return cls(**data, **self.additional_data) def setUp(self): @@ -452,6 +466,18 @@ def eval_jax(self, jax_obj: Any) -> Any: ).items() } + def eval_pd(self, pd_obj: Any) -> Any: + return { + kk: paddle_to_numpy(vv) + for kk, vv in pd_obj.forward_lower( + numpy_to_paddle(self.extended_coord), + numpy_to_paddle(self.extended_atype), + numpy_to_paddle(self.nlist), + numpy_to_paddle(self.mapping), + do_atomic_virial=True, + ).items() + } + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: @@ -478,4 +504,12 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["energy_derv_c_redu"].ravel(), ret["energy_derv_c"].ravel(), ) + elif backend is self.RefBackend.PD: + return ( + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["extended_force"].flatten(), + ret["virial"].flatten(), + ret["extended_virial"].flatten(), + ) raise ValueError(f"Unknown backend: {backend}") From 39842ff01855db58d9fe964ed103ccf1615ff811 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 6 Nov 2024 11:01:04 +0800 Subject: [PATCH 14/58] add missing pd_class --- source/tests/consistent/model/test_ener.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 4ce09446b9..cb6fb2cf99 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -335,6 +335,7 @@ def data(self) -> dict: dp_class = EnergyModelDP pt_class = EnergyModelPT jax_class = EnergyModelJAX + pd_class = EnergyModelPD args = model_args() def get_reference_backend(self): From 07cd98e3c3354f283ec1e71d79e47b0c35ffb8d5 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 6 Nov 2024 13:34:54 +0800 Subject: [PATCH 15/58] use paddle Tensor instead of numpy array in pd/test_auto_batch_size.py to coverage newly added code --- source/tests/pd/test_auto_batch_size.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/source/tests/pd/test_auto_batch_size.py b/source/tests/pd/test_auto_batch_size.py index 1033f46d07..966333f47c 100644 --- a/source/tests/pd/test_auto_batch_size.py +++ b/source/tests/pd/test_auto_batch_size.py @@ -2,6 +2,7 @@ import unittest import numpy as np +import paddle from deepmd.pd.utils.auto_batch_size import ( AutoBatchSize, @@ -10,28 +11,28 @@ class TestAutoBatchSize(unittest.TestCase): def test_execute_all(self): - dd0 = np.zeros((10000, 2, 1, 3, 4)) - dd1 = np.ones((10000, 2, 1, 3, 4)) + dd0 = paddle.zeros((10000, 2, 1, 3, 4)) + dd1 = paddle.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): - return np.zeros_like(dd1), np.ones_like(dd1) + return paddle.zeros_like(dd1), paddle.ones_like(dd1) dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0, dd2[0]) - np.testing.assert_equal(dd1, dd2[1]) + np.testing.assert_equal(dd0.numpy(), dd2[0].numpy()) + np.testing.assert_equal(dd1.numpy(), dd2[1].numpy()) def test_execute_all_dict(self): - dd0 = np.zeros((10000, 2, 1, 3, 4)) - dd1 = np.ones((10000, 2, 1, 3, 4)) + dd0 = paddle.zeros((10000, 2, 1, 3, 4)) + dd1 = paddle.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): return { - "foo": np.zeros_like(dd1), - "bar": np.ones_like(dd1), + "foo": paddle.zeros_like(dd1), + "bar": paddle.ones_like(dd1), } dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0, dd2["foo"]) - np.testing.assert_equal(dd1, dd2["bar"]) + np.testing.assert_equal(dd0.numpy(), dd2["foo"].numpy()) + np.testing.assert_equal(dd1.numpy(), dd2["bar"].numpy()) From bb2d54709c07da3c6ae0a74033c454603714a73d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 11:40:45 +0800 Subject: [PATCH 16/58] add training test and remove ase_calc.py --- .pre-commit-config.yaml | 52 +++---- deepmd/pd/utils/ase_calc.py | 6 - source/tests/pd/test_training.py | 176 ++++++++++++++++++++++++ source/tests/pt/test_auto_batch_size.py | 23 ++-- 4 files changed, 214 insertions(+), 43 deletions(-) delete mode 100644 deepmd/pd/utils/ase_calc.py create mode 100644 source/tests/pd/test_training.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 721a0cd6eb..9e7e3b763e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/pd/utils/ase_calc.py b/deepmd/pd/utils/ase_calc.py deleted file mode 100644 index 6bcb9cdc5e..0000000000 --- a/deepmd/pd/utils/ase_calc.py +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from deepmd.calculator import DP as DPCalculator - -__all__ = [ - "DPCalculator", -] diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py new file mode 100644 index 0000000000..d4e7309a65 --- /dev/null +++ b/source/tests/pd/test_training.py @@ -0,0 +1,176 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) + +from .model.test_permutation import ( + model_se_e2_a, +) + + +class DPTrainTest: + def test_dp_train(self): + # test training from scratch + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + state_dict_trained = trainer.wrapper.model.state_dict() + + # test fine-tuning using same input + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pd" + self.config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config["model"], + ) + trainer_finetune = get_trainer( + deepcopy(self.config), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test fine-tuning using empty input + self.config_empty = deepcopy(self.config) + if "descriptor" in self.config_empty["model"]: + self.config_empty["model"]["descriptor"] = {} + if "fitting_net" in self.config_empty["model"]: + self.config_empty["model"]["fitting_net"] = {} + self.config_empty["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config_empty["model"], + change_model_params=True, + ) + trainer_finetune_empty = get_trainer( + deepcopy(self.config_empty), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test fine-tuning using random fitting + self.config["model"], finetune_links = get_finetune_rules( + finetune_model, self.config["model"], model_branch="RANDOM" + ) + trainer_finetune_random = get_trainer( + deepcopy(self.config_empty), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # check parameters + state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + state_dict_finetuned_empty = trainer_finetune_empty.wrapper.model.state_dict() + state_dict_finetuned_random = trainer_finetune_random.wrapper.model.state_dict() + for state_key in state_dict_finetuned: + if "out_bias" not in state_key and "out_std" not in state_key: + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned[state_key].numpy(), + ) + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned_empty[state_key].numpy(), + ) + if "fitting_net" not in state_key: + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned_random[state_key].numpy(), + ) + + # check running + trainer_finetune.run() + trainer_finetune_empty.run() + trainer_finetune_random.run() + + def test_trainable(self): + fix_params = deepcopy(self.config) + fix_params["model"]["descriptor"]["trainable"] = False + fix_params["model"]["fitting_net"]["trainable"] = False + free_descriptor = hasattr(self, "not_all_grad") and self.not_all_grad + if free_descriptor: + # can not set requires_grad false for all parameters, + # because the input coord has no grad, thus the loss if all set to false + # we only check trainable for fitting net + fix_params["model"]["descriptor"]["trainable"] = True + trainer_fix = get_trainer(fix_params) + model_dict_before_training = deepcopy( + trainer_fix.model.get_fitting_net().state_dict() + ) + trainer_fix.run() + model_dict_after_training = deepcopy( + trainer_fix.model.get_fitting_net().state_dict() + ) + else: + trainer_fix = get_trainer(fix_params) + model_dict_before_training = deepcopy(trainer_fix.model.state_dict()) + trainer_fix.run() + model_dict_after_training = deepcopy(trainer_fix.model.state_dict()) + for key in model_dict_before_training: + np.testing.assert_allclose( + model_dict_before_training[key].numpy(), + model_dict_after_training[key].numpy(), + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +class TestFparam(unittest.TestCase, DPTrainTest): + """Test if `fparam` can be loaded correctly.""" + + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["model"]["fitting_net"]["numb_fparam"] = 1 + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.set_path = Path(__file__).parent / "water/data/data_0" / "set.000" + shutil.copyfile(self.set_path / "energy.npy", self.set_path / "fparam.npy") + + def tearDown(self) -> None: + (self.set_path / "fparam.npy").unlink(missing_ok=True) + DPTrainTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_auto_batch_size.py b/source/tests/pt/test_auto_batch_size.py index 71194e001e..a318cd7b24 100644 --- a/source/tests/pt/test_auto_batch_size.py +++ b/source/tests/pt/test_auto_batch_size.py @@ -2,6 +2,7 @@ import unittest import numpy as np +import torch from deepmd.pt.utils.auto_batch_size import ( AutoBatchSize, @@ -10,28 +11,28 @@ class TestAutoBatchSize(unittest.TestCase): def test_execute_all(self): - dd0 = np.zeros((10000, 2, 1, 3, 4)) - dd1 = np.ones((10000, 2, 1, 3, 4)) + dd0 = torch.zeros((10000, 2, 1, 3, 4)) + dd1 = torch.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): - return np.zeros_like(dd1), np.ones_like(dd1) + return torch.zeros_like(dd1), torch.ones_like(dd1) dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0, dd2[0]) - np.testing.assert_equal(dd1, dd2[1]) + np.testing.assert_equal(dd0.cpu().numpy(), dd2[0].cpu().numpy()) + np.testing.assert_equal(dd1.cpu().numpy(), dd2[1].cpu().numpy()) def test_execute_all_dict(self): - dd0 = np.zeros((10000, 2, 1, 3, 4)) - dd1 = np.ones((10000, 2, 1, 3, 4)) + dd0 = torch.zeros((10000, 2, 1, 3, 4)) + dd1 = torch.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): return { - "foo": np.zeros_like(dd1), - "bar": np.ones_like(dd1), + "foo": torch.zeros_like(dd1), + "bar": torch.ones_like(dd1), } dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0, dd2["foo"]) - np.testing.assert_equal(dd1, dd2["bar"]) + np.testing.assert_equal(dd0.cpu().numpy(), dd2["foo"].cpu().numpy()) + np.testing.assert_equal(dd1.cpu().numpy(), dd2["bar"].cpu().numpy()) From 5fb6d8ec529d0ddc25024560fa038c809591fd2a Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 11:41:00 +0800 Subject: [PATCH 17/58] add training test and remove ase_calc.py --- .pre-commit-config.yaml | 52 ++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e7e3b763e..721a0cd6eb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v4.0.0-alpha.8 - # hooks: - # - id: prettier - # types_or: [markdown, yaml, css] - # # workflow files cannot be modified by pre-commit.ci - # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [markdown, yaml, css] + # workflow files cannot be modified by pre-commit.ci + exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - # - repo: https://github.com/njzjz/mirrors-bibtex-tidy - # rev: v1.13.0 - # hooks: - # - id: bibtex-tidy - # args: - # - --curly - # - --numeric - # - --align=13 - # - --blank-lines - # # disable sort: the order of keys and fields has explict meanings - # #- --sort=key - # - --duplicates=key,doi,citation,abstract - # - --merge=combine - # #- --sort-fields - # #- --strip-comments - # - --trailing-commas - # - --encode-urls - # - --remove-empty-fields - # - --wrap=80 + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 From 90c9c03706bbcd2533b9f5f11ea5816e04b26511 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 12:25:22 +0800 Subject: [PATCH 18/58] upload missing json --- source/tests/pd/model/water/se_atten.json | 83 +++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 source/tests/pd/model/water/se_atten.json diff --git a/source/tests/pd/model/water/se_atten.json b/source/tests/pd/model/water/se_atten.json new file mode 100644 index 0000000000..70abf6759c --- /dev/null +++ b/source/tests/pd/model/water/se_atten.json @@ -0,0 +1,83 @@ +{ + "_comment": "that's all", + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 16, + "type_one_side": true, + "attn": 64, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": false, + "temperature": 1.0, + "seed": 1 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 1, + "_comment": "that's all" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "save_ckpt": "model", + "_comment": "that's all" + } +} From eb7384e57fdc350650c4f99a16d9c398d392bb66 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 13:07:26 +0800 Subject: [PATCH 19/58] restore pt/test_auto_batch_size.py --- source/tests/pt/test_auto_batch_size.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/source/tests/pt/test_auto_batch_size.py b/source/tests/pt/test_auto_batch_size.py index a318cd7b24..71194e001e 100644 --- a/source/tests/pt/test_auto_batch_size.py +++ b/source/tests/pt/test_auto_batch_size.py @@ -2,7 +2,6 @@ import unittest import numpy as np -import torch from deepmd.pt.utils.auto_batch_size import ( AutoBatchSize, @@ -11,28 +10,28 @@ class TestAutoBatchSize(unittest.TestCase): def test_execute_all(self): - dd0 = torch.zeros((10000, 2, 1, 3, 4)) - dd1 = torch.ones((10000, 2, 1, 3, 4)) + dd0 = np.zeros((10000, 2, 1, 3, 4)) + dd1 = np.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): - return torch.zeros_like(dd1), torch.ones_like(dd1) + return np.zeros_like(dd1), np.ones_like(dd1) dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0.cpu().numpy(), dd2[0].cpu().numpy()) - np.testing.assert_equal(dd1.cpu().numpy(), dd2[1].cpu().numpy()) + np.testing.assert_equal(dd0, dd2[0]) + np.testing.assert_equal(dd1, dd2[1]) def test_execute_all_dict(self): - dd0 = torch.zeros((10000, 2, 1, 3, 4)) - dd1 = torch.ones((10000, 2, 1, 3, 4)) + dd0 = np.zeros((10000, 2, 1, 3, 4)) + dd1 = np.ones((10000, 2, 1, 3, 4)) auto_batch_size = AutoBatchSize(256, 2.0) def func(dd1): return { - "foo": torch.zeros_like(dd1), - "bar": torch.ones_like(dd1), + "foo": np.zeros_like(dd1), + "bar": np.ones_like(dd1), } dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) - np.testing.assert_equal(dd0.cpu().numpy(), dd2["foo"].cpu().numpy()) - np.testing.assert_equal(dd1.cpu().numpy(), dd2["bar"].cpu().numpy()) + np.testing.assert_equal(dd0, dd2["foo"]) + np.testing.assert_equal(dd1, dd2["bar"]) From 9faf54f3923731c7a1b5586abd0b64f4081ad72d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 14:02:29 +0800 Subject: [PATCH 20/58] rerun CI for network problem From 4e3a121c24310517b2b1a7f4f0374cca19d8b1c6 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 16:58:30 +0800 Subject: [PATCH 21/58] add multitask unitest --- .pre-commit-config.yaml | 52 ++--- source/tests/pd/model/water/multitask.json | 140 +++++++++++++ source/tests/pd/test_multitask.py | 224 +++++++++++++++++++++ 3 files changed, 390 insertions(+), 26 deletions(-) create mode 100644 source/tests/pd/model/water/multitask.json create mode 100644 source/tests/pd/test_multitask.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 721a0cd6eb..9e7e3b763e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/source/tests/pd/model/water/multitask.json b/source/tests/pd/model/water/multitask.json new file mode 100644 index 0000000000..83524a8b77 --- /dev/null +++ b/source/tests/pd/model/water/multitask.json @@ -0,0 +1,140 @@ +{ + "model": { + "shared_dict": { + "my_type_map": [ + "O", + "H", + "B" + ], + "my_descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "_comment": "that's all" + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 1 + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 1 + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.0002, + "decay_rate": 0.98, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "model_1": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "model_2": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + } + }, + "training": { + "model_prob": { + "model_1": 0.5, + "model_2": 0.5 + }, + "data_dict": { + "model_1": { + "stat_file": "./stat_files/model_1.hdf5", + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "model_2": { + "stat_file": "./stat_files/model_2.hdf5", + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 100000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 100, + "_comment": "that's all" + } +} diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py new file mode 100644 index 0000000000..e3d4cfa7de --- /dev/null +++ b/source/tests/pd/test_multitask.py @@ -0,0 +1,224 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) +from deepmd.utils.argcheck import ( + normalize, +) +from deepmd.utils.compat import ( + update_deepmd_input, +) + +from .model.test_permutation import ( + model_se_e2_a, +) + + +def setUpModule(): + global multitask_template + multitask_template_json = str(Path(__file__).parent / "water/multitask.json") + with open(multitask_template_json) as f: + multitask_template = json.load(f) + + +class MultiTaskTrainTest: + def test_multitask_train(self): + # test multitask training + self.config = update_deepmd_input(self.config, warning=True) + self.config = normalize(self.config, multi_task=True) + trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) + trainer.run() + # check model keys + self.assertEqual(len(trainer.wrapper.model), 2) + self.assertIn("model_1", trainer.wrapper.model) + self.assertIn("model_2", trainer.wrapper.model) + + # check shared parameters + multi_state_dict = trainer.wrapper.model.state_dict() + for state_key in multi_state_dict: + if "model_1" in state_key: + self.assertIn(state_key.replace("model_1", "model_2"), multi_state_dict) + if "model_2" in state_key: + self.assertIn(state_key.replace("model_2", "model_1"), multi_state_dict) + if "model_1.descriptor" in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict[state_key.replace("model_1", "model_2")].numpy(), + ) + + # test multitask fine-tuning + # add model_3 + self.origin_config["model"]["model_dict"]["model_3"] = deepcopy( + self.origin_config["model"]["model_dict"]["model_2"] + ) + self.origin_config["loss_dict"]["model_3"] = deepcopy( + self.origin_config["loss_dict"]["model_2"] + ) + self.origin_config["training"]["model_prob"]["model_3"] = deepcopy( + self.origin_config["training"]["model_prob"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_3"] = deepcopy( + self.origin_config["training"]["data_dict"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_3"]["stat_file"] = ( + self.origin_config[ + "training" + ]["data_dict"]["model_3"]["stat_file"].replace("model_2", "model_3") + ) + + # add model_4 + self.origin_config["model"]["model_dict"]["model_4"] = deepcopy( + self.origin_config["model"]["model_dict"]["model_2"] + ) + self.origin_config["loss_dict"]["model_4"] = deepcopy( + self.origin_config["loss_dict"]["model_2"] + ) + self.origin_config["training"]["model_prob"]["model_4"] = deepcopy( + self.origin_config["training"]["model_prob"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_4"] = deepcopy( + self.origin_config["training"]["data_dict"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_4"]["stat_file"] = ( + self.origin_config[ + "training" + ]["data_dict"]["model_4"]["stat_file"].replace("model_2", "model_4") + ) + + # set finetune rules + # model_1 resuming from model_1 + # pass + + # model_2 fine-tuning from model_2 + self.origin_config["model"]["model_dict"]["model_2"]["finetune_head"] = ( + "model_2" + ) + + # new model_3 fine-tuning from model_2 + self.origin_config["model"]["model_dict"]["model_3"]["finetune_head"] = ( + "model_2" + ) + + # new model_4 fine-tuning with randomly initialized fitting net + # pass + + self.origin_config["model"], shared_links_finetune = preprocess_shared_params( + self.origin_config["model"] + ) + + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pd" + self.origin_config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.origin_config["model"], + ) + self.origin_config = update_deepmd_input(self.origin_config, warning=True) + self.origin_config = normalize(self.origin_config, multi_task=True) + trainer_finetune = get_trainer( + deepcopy(self.origin_config), + finetune_model=finetune_model, + shared_links=shared_links_finetune, + finetune_links=finetune_links, + ) + + # check parameters + multi_state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + for state_key in multi_state_dict_finetuned: + if "model_1" in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif "model_2" in state_key and "out_bias" not in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif "model_3" in state_key and "out_bias" not in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key.replace("model_3", "model_2")].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif ( + "model_4" in state_key + and "fitting_net" not in state_key + and "out_bias" not in state_key + ): + np.testing.assert_allclose( + multi_state_dict[state_key.replace("model_4", "model_2")].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + + # check running + trainer_finetune.run() + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in [self.stat_files]: + shutil.rmtree(f) + + +class TestMultiTaskSeA(unittest.TestCase, MultiTaskTrainTest): + def setUp(self): + multitask_se_e2_a = deepcopy(multitask_template) + multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_se_e2_a + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() From 18333ab025fa42f5055d41b58a77495fd9440140 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 17:10:41 +0800 Subject: [PATCH 22/58] add more unitest --- .pre-commit-config.yaml | 52 ++--- source/tests/pd/model/test_force_grad.py | 111 +++++++++++ source/tests/pd/model/test_jit.py | 83 ++++++++ source/tests/pd/test_dp_show.py | 231 +++++++++++++++++++++++ source/tests/pd/test_init_model.py | 136 +++++++++++++ 5 files changed, 587 insertions(+), 26 deletions(-) create mode 100644 source/tests/pd/model/test_force_grad.py create mode 100644 source/tests/pd/model/test_jit.py create mode 100644 source/tests/pd/test_dp_show.py create mode 100644 source/tests/pd/test_init_model.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e7e3b763e..721a0cd6eb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v4.0.0-alpha.8 - # hooks: - # - id: prettier - # types_or: [markdown, yaml, css] - # # workflow files cannot be modified by pre-commit.ci - # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [markdown, yaml, css] + # workflow files cannot be modified by pre-commit.ci + exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - # - repo: https://github.com/njzjz/mirrors-bibtex-tidy - # rev: v1.13.0 - # hooks: - # - id: bibtex-tidy - # args: - # - --curly - # - --numeric - # - --align=13 - # - --blank-lines - # # disable sort: the order of keys and fields has explict meanings - # #- --sort=key - # - --duplicates=key,doi,citation,abstract - # - --merge=combine - # #- --sort-fields - # #- --strip-comments - # - --trailing-commas - # - --encode-urls - # - --remove-empty-fields - # - --wrap=80 + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/source/tests/pd/model/test_force_grad.py b/source/tests/pd/model/test_force_grad.py new file mode 100644 index 0000000000..d7b569ef38 --- /dev/null +++ b/source/tests/pd/model/test_force_grad.py @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.utils.data import ( + DeepmdData, +) + +from ...seed import ( + GLOBAL_SEED, +) + + +class CheckSymmetry(DeepmdData): + def __init__( + self, + sys_path: str, + type_map: Optional[list[str]] = None, + ): + super().__init__(sys_path=sys_path, type_map=type_map) + self.add("energy", 1, atomic=False, must=False, high_prec=True) + self.add("force", 3, atomic=True, must=False, high_prec=False) + self.add("virial", 9, atomic=False, must=False, high_prec=False) + + def get_disturb(self, index, atom_index, axis_index, delta): + for i in range( + 0, len(self.dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self.dirs[i - 1]) + tmp = copy.deepcopy(frames["coord"].reshape(self.nframes, -1, 3)) + tmp[:, atom_index, axis_index] += delta + frames["coord"] = tmp + frame = self._get_subdata(frames, index - self.prefix_sum[i - 1]) + frame = self.reformat_data_torch(frame) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = batch[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestForceGrad(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.system_index = 0 + self.batch_index = 0 + self.get_dataset(self.system_index, self.batch_index) + self.get_model() + + def get_model(self): + self.model = get_model(self.config["model"]).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + rcut = self.config["model"]["descriptor"]["rcut"] + sel = self.config["model"]["descriptor"]["sel"] + sec = paddle.cumsum(paddle.to_tensor(sel), axis=0) + type_map = self.config["model"]["type_map"] + self.dpdatasystem = CheckSymmetry( + sys_path=systems[system_index], type_map=type_map + ) + self.origin_batch = self.dpdatasystem.get_item_paddle(batch_index) + + @unittest.skip("it can be replaced by autodiff") + def test_force_grad(self, threshold=1e-2, delta0=1e-6, seed=20): + rng = np.random.default_rng(GLOBAL_SEED) + result0 = self.model(**get_data(self.origin_batch)) + np.random.default_rng(seed) + errors = np.zeros((self.dpdatasystem.natoms, 3)) + for atom_index in range(self.dpdatasystem.natoms): + for axis_index in range(3): + delta = rng.random() * delta0 + disturb_batch = self.dpdatasystem.get_disturb( + self.batch_index, atom_index, axis_index, delta + ) + disturb_result = self.model(**get_data(disturb_batch)) + disturb_force = -(disturb_result["energy"] - result0["energy"]) / delta + disturb_error = ( + result0["force"][0, atom_index, axis_index] - disturb_force + ) + errors[atom_index, axis_index] = disturb_error.detach().cpu().numpy() + self.assertTrue(np.abs(errors).max() < threshold, msg=str(np.abs(errors).max())) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_jit.py b/source/tests/pd/model/test_jit.py new file mode 100644 index 0000000000..772a05530f --- /dev/null +++ b/source/tests/pd/model/test_jit.py @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import paddle +from paddle.static import ( + InputSpec, +) + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.infer import ( + inference, +) + +from .test_permutation import ( + model_se_e2_a, +) + + +class JITTest: + def test_jit(self): + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + model = paddle.jit.to_static( + inference.Tester("./model.pd").model, full_graph=True + ) + paddle.jit.save( + model, + "./frozen_model", + input_spec=[ + InputSpec([-1, -1, 3], dtype="float64"), + InputSpec([-1, -1], dtype="int32"), + InputSpec([-1, -1, -1], dtype="int32"), + ], + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pt"): + os.remove(f) + if f in ["lcurve.out", "frozen_model.json", "frozen_model.pdiparams"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + if f in ["checkpoint"]: + os.remove(f) + + +class TestEnergyModelSeA(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_dp_show.py b/source/tests/pd/test_dp_show.py new file mode 100644 index 0000000000..351a7f971f --- /dev/null +++ b/source/tests/pd/test_dp_show.py @@ -0,0 +1,231 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import io +import json +import os +import shutil +import unittest +from contextlib import ( + redirect_stderr, +) +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) + +from .common import ( + run_dp, +) +from .model.test_permutation import ( + model_se_e2_a, +) + + +class TestSingleTaskModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + os.environ["FLAGS_prim_enable_dynamic"] = "1" + os.environ["FLAGS_enable_pir_api"] = "1" + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["model"]["type_map"] = ["O", "H", "Au"] + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + run_dp("dp --pd freeze") + + # @unittest.skip( + # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + # "will be supported in the future." + # ) + def test_checkpoint(self): + INPUT = "model.pd" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'Au']" in results[-3] + assert ( + "{'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut': 4.0" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [24, 24, 24], 'resnet_dt': True, 'seed': 1}" + in results[-1] + ) + + @unittest.skip( + "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_frozen_model(self): + INPUT = "frozen_model.json" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'Au']" in results[-3] + assert ( + "{'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut': 4.0" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [24, 24, 24], 'resnet_dt': True, 'seed': 1}" + in results[-1] + ) + + # @unittest.skip( + # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + # "will be supported in the future." + # ) + def test_checkpoint_error(self): + INPUT = "model.pd" + ATTRIBUTES = "model-branch type-map descriptor fitting-net" + with self.assertRaisesRegex( + RuntimeError, "The 'model-branch' option requires a multitask model" + ): + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pd"): + os.remove(f) + if f in ["lcurve.out", "frozen_model.pd", "output.txt", "checkpoint"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestMultiTaskModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/multitask.json") + with open(input_json) as f: + self.config = json.load(f) + self.config["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a" + os.makedirs(self.stat_files, exist_ok=True) + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["model"]["model_dict"]["model_1"]["fitting_net"] = { + "neuron": [1, 2, 3], + "seed": 678, + } + self.config["model"]["model_dict"]["model_2"]["fitting_net"] = { + "neuron": [9, 8, 7], + "seed": 1111, + } + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) + trainer.run() + run_dp("dp --pd freeze --head model_1") + + # @unittest.skip( + # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + # "will be supported in the future." + # ) + def test_checkpoint(self): + INPUT = "model.ckpt.pd" + ATTRIBUTES = "model-branch type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a multitask model" in results[-8] + assert ( + "Available model branches are ['model_1', 'model_2', 'RANDOM'], " + "where 'RANDOM' means using a randomly initialized fitting net." + in results[-7] + ) + assert "The type_map of branch model_1 is ['O', 'H', 'B']" in results[-6] + assert "The type_map of branch model_2 is ['O', 'H', 'B']" in results[-5] + assert ( + "model_1" + and "'type': 'se_e2_a'" + and "'sel': [46, 92, 4]" + and "'rcut_smth': 0.5" + ) in results[-4] + assert ( + "model_2" + and "'type': 'se_e2_a'" + and "'sel': [46, 92, 4]" + and "'rcut_smth': 0.5" + ) in results[-3] + assert ( + "The fitting_net parameter of branch model_1 is {'neuron': [1, 2, 3], 'seed': 678}" + in results[-2] + ) + assert ( + "The fitting_net parameter of branch model_2 is {'neuron': [9, 8, 7], 'seed': 1111}" + in results[-1] + ) + + @unittest.skip( + "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_frozen_model(self): + INPUT = "frozen_model.json" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'B']" in results[-3] + assert ( + "'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut_smth': 0.5" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [1, 2, 3], 'seed': 678}" + in results[-1] + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pd"): + os.remove(f) + if f in [ + "lcurve.out", + "frozen_model.json", + "frozen_model.pdiparams", + "checkpoint", + "output.txt", + ]: + os.remove(f) + if f in ["stat_files", self.stat_files]: + shutil.rmtree(f) diff --git a/source/tests/pd/test_init_model.py b/source/tests/pd/test_init_model.py new file mode 100644 index 0000000000..50c1e82ad6 --- /dev/null +++ b/source/tests/pd/test_init_model.py @@ -0,0 +1,136 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import tempfile +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.infer.deep_eval import ( + DeepPot, +) + +from .common import ( + run_dp, +) + + +class TestInitModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + config = json.load(f) + config["model"]["descriptor"]["smooth_type_embedding"] = True + config["training"]["numb_steps"] = 1 + config["training"]["save_freq"] = 1 + config["learning_rate"]["start_lr"] = 1.0 + config["training"]["training_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + config["training"]["validation_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + + self.models = [] + for imodel in range(3): + ckpt_model = f"model{imodel}.ckpt" + if imodel == 0: + temp_config = deepcopy(config) + temp_config["training"]["save_ckpt"] = ckpt_model + trainer = get_trainer(temp_config) + elif imodel == 1: + temp_config = deepcopy(config) + temp_config["training"]["numb_steps"] = 0 + temp_config["training"]["save_ckpt"] = ckpt_model + trainer = get_trainer(temp_config, init_model=self.models[-1]) + else: + empty_config = deepcopy(config) + empty_config["model"]["descriptor"] = {} + empty_config["model"]["fitting_net"] = {} + empty_config["training"]["numb_steps"] = 0 + empty_config["training"]["save_ckpt"] = ckpt_model + tmp_input = tempfile.NamedTemporaryFile(delete=False, suffix=".json") + with open(tmp_input.name, "w") as f: + json.dump(empty_config, f, indent=4) + run_dp( + f"dp --pd train {tmp_input.name} --init-model {self.models[-1]} --use-pretrain-script --skip-neighbor-stat" + ) + trainer = None + + if imodel in [0, 1]: + trainer.run() + self.models.append(ckpt_model + ".pd") + + def test_dp_test(self): + dp1 = DeepPot(str(self.models[0])) + dp2 = DeepPot(str(self.models[1])) + dp3 = DeepPot(str(self.models[2])) + cell = np.array( + [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + ).reshape(1, 3, 3) + coord = np.array( + [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + ).reshape(1, -1, 3) + atype = np.array([0, 0, 0, 1, 1]).reshape(1, -1) + + ret1 = dp1.eval(coord, cell, atype, atomic=True) + e1, f1, v1, ae1, av1 = ret1[0], ret1[1], ret1[2], ret1[3], ret1[4] + ret2 = dp2.eval(coord, cell, atype, atomic=True) + e2, f2, v2, ae2, av2 = ret2[0], ret2[1], ret2[2], ret2[3], ret2[4] + ret3 = dp3.eval(coord, cell, atype, atomic=True) + e3, f3, v3, ae3, av3 = ret3[0], ret3[1], ret3[2], ret3[3], ret3[4] + np.testing.assert_allclose(e1, e2, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(e1, e3, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(f1, f2, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(f1, f3, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(v1, v2, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(v1, v3, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(ae1, ae2, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(ae1, ae3, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(av1, av2, rtol=1e-10, atol=1e-10) + np.testing.assert_allclose(av1, av3, rtol=1e-10, atol=1e-10) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) From 3fd979d153a8777c12aaa0453037c7808582469d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 7 Nov 2024 22:28:31 +0800 Subject: [PATCH 23/58] remove redundant file and fix typo --- deepmd/pd/model/descriptor/se_a.py | 4 +- deepmd/pd/utils/decomp.py | 2 +- source/tests/pd/test_init_model.py | 136 ----------------------------- 3 files changed, 3 insertions(+), 139 deletions(-) delete mode 100644 source/tests/pd/test_init_model.py diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 76ea32797f..124fa3a88f 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -479,11 +479,11 @@ def get_dim_in(self) -> int: return self.dim_in def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 25eac1b6d5..86abe93b43 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -68,7 +68,7 @@ def norm_decomp( if p == 2 or p == 2.0: # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward return (x * x).sum(axis=axis, keepdim=keepdim).clip(1e-12) ** 0.5 - return (x**p).sum(axis=axis, keepdim=keepdim) ** (1 / p) + return (x.abs()**p).sum(axis=axis, keepdim=keepdim) ** (1 / p) def take_along_axis_decomp( diff --git a/source/tests/pd/test_init_model.py b/source/tests/pd/test_init_model.py deleted file mode 100644 index 50c1e82ad6..0000000000 --- a/source/tests/pd/test_init_model.py +++ /dev/null @@ -1,136 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import json -import os -import shutil -import tempfile -import unittest -from copy import ( - deepcopy, -) -from pathlib import ( - Path, -) - -import numpy as np - -from deepmd.pd.entrypoints.main import ( - get_trainer, -) -from deepmd.pd.infer.deep_eval import ( - DeepPot, -) - -from .common import ( - run_dp, -) - - -class TestInitModel(unittest.TestCase): - def setUp(self): - input_json = str(Path(__file__).parent / "water/se_atten.json") - with open(input_json) as f: - config = json.load(f) - config["model"]["descriptor"]["smooth_type_embedding"] = True - config["training"]["numb_steps"] = 1 - config["training"]["save_freq"] = 1 - config["learning_rate"]["start_lr"] = 1.0 - config["training"]["training_data"]["systems"] = [ - str(Path(__file__).parent / "water/data/single") - ] - config["training"]["validation_data"]["systems"] = [ - str(Path(__file__).parent / "water/data/single") - ] - - self.models = [] - for imodel in range(3): - ckpt_model = f"model{imodel}.ckpt" - if imodel == 0: - temp_config = deepcopy(config) - temp_config["training"]["save_ckpt"] = ckpt_model - trainer = get_trainer(temp_config) - elif imodel == 1: - temp_config = deepcopy(config) - temp_config["training"]["numb_steps"] = 0 - temp_config["training"]["save_ckpt"] = ckpt_model - trainer = get_trainer(temp_config, init_model=self.models[-1]) - else: - empty_config = deepcopy(config) - empty_config["model"]["descriptor"] = {} - empty_config["model"]["fitting_net"] = {} - empty_config["training"]["numb_steps"] = 0 - empty_config["training"]["save_ckpt"] = ckpt_model - tmp_input = tempfile.NamedTemporaryFile(delete=False, suffix=".json") - with open(tmp_input.name, "w") as f: - json.dump(empty_config, f, indent=4) - run_dp( - f"dp --pd train {tmp_input.name} --init-model {self.models[-1]} --use-pretrain-script --skip-neighbor-stat" - ) - trainer = None - - if imodel in [0, 1]: - trainer.run() - self.models.append(ckpt_model + ".pd") - - def test_dp_test(self): - dp1 = DeepPot(str(self.models[0])) - dp2 = DeepPot(str(self.models[1])) - dp3 = DeepPot(str(self.models[2])) - cell = np.array( - [ - 5.122106549439247480e00, - 4.016537340154059388e-01, - 6.951654033828678081e-01, - 4.016537340154059388e-01, - 6.112136112297989143e00, - 8.178091365465004481e-01, - 6.951654033828678081e-01, - 8.178091365465004481e-01, - 6.159552512682983760e00, - ] - ).reshape(1, 3, 3) - coord = np.array( - [ - 2.978060152121375648e00, - 3.588469695887098077e00, - 2.792459820604495491e00, - 3.895592322591093115e00, - 2.712091020667753760e00, - 1.366836847133650501e00, - 9.955616170888935690e-01, - 4.121324820711413039e00, - 1.817239061889086571e00, - 3.553661462345699906e00, - 5.313046969500791583e00, - 6.635182659098815883e00, - 6.088601018589653080e00, - 6.575011420004332585e00, - 6.825240650611076099e00, - ] - ).reshape(1, -1, 3) - atype = np.array([0, 0, 0, 1, 1]).reshape(1, -1) - - ret1 = dp1.eval(coord, cell, atype, atomic=True) - e1, f1, v1, ae1, av1 = ret1[0], ret1[1], ret1[2], ret1[3], ret1[4] - ret2 = dp2.eval(coord, cell, atype, atomic=True) - e2, f2, v2, ae2, av2 = ret2[0], ret2[1], ret2[2], ret2[3], ret2[4] - ret3 = dp3.eval(coord, cell, atype, atomic=True) - e3, f3, v3, ae3, av3 = ret3[0], ret3[1], ret3[2], ret3[3], ret3[4] - np.testing.assert_allclose(e1, e2, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(e1, e3, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(f1, f2, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(f1, f3, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(v1, v2, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(v1, v3, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(ae1, ae2, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(ae1, ae3, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(av1, av2, rtol=1e-10, atol=1e-10) - np.testing.assert_allclose(av1, av3, rtol=1e-10, atol=1e-10) - - def tearDown(self): - for f in os.listdir("."): - if f.startswith("model") and f.endswith(".pd"): - os.remove(f) - if f in ["lcurve.out"]: - os.remove(f) - if f in ["stat_files"]: - shutil.rmtree(f) From 5922e84bab383cdabd02aba55f35a56e01e30bd0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:29:55 +0000 Subject: [PATCH 24/58] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pd/utils/decomp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 86abe93b43..0395d25e66 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -68,7 +68,7 @@ def norm_decomp( if p == 2 or p == 2.0: # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward return (x * x).sum(axis=axis, keepdim=keepdim).clip(1e-12) ** 0.5 - return (x.abs()**p).sum(axis=axis, keepdim=keepdim) ** (1 / p) + return (x.abs() ** p).sum(axis=axis, keepdim=keepdim) ** (1 / p) def take_along_axis_decomp( From f5a17a96fee4a225efa47a0bb45d817a266f9ffc Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 8 Nov 2024 12:08:23 +0800 Subject: [PATCH 25/58] update unitest --- deepmd/pd/utils/decomp.py | 24 ++--- source/tests/pd/test_change_bias.py | 150 ++++++++++++++++++++++++++++ source/tests/pd/test_decomp.py | 141 ++++++++++++++++++++++++++ source/tests/pd/test_dp_show.py | 12 --- 4 files changed, 303 insertions(+), 24 deletions(-) create mode 100644 source/tests/pd/test_change_bias.py create mode 100644 source/tests/pd/test_decomp.py diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 0395d25e66..434301441a 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -67,7 +67,7 @@ def norm_decomp( """ if p == 2 or p == 2.0: # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward - return (x * x).sum(axis=axis, keepdim=keepdim).clip(1e-12) ** 0.5 + return (x * x).sum(axis=axis, keepdim=keepdim) ** 0.5 return (x.abs() ** p).sum(axis=axis, keepdim=keepdim) ** (1 / p) @@ -134,21 +134,20 @@ def scatter_reduce_decomp( """ # reduce: "sum", "prod", "mean", "amax", "amin" if reduce == "sum": - input.put_along_axis_(indices=index, values=src, axis=axis, reduce="add") + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="add" + ) elif reduce == "mean": - input.put_along_axis_(indices=index, values=src, axis=axis, reduce="add") - dst_div = paddle.ones_like(input).put_along_axis( - indices=index, - values=paddle.to_tensor(1.0, dtype=input.dtype), - axis=axis, - reduce="add", + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="mean" ) - input = input / dst_div elif reduce == "prod": - input = input.put_along_axis(indices=index, values=src, axis=axis, reduce="mul") + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="mul" + ) else: raise NotImplementedError("only support mode in ['sum', 'prod', 'mean']!") - return input + return output def sec(length: int, size: int) -> list[int]: @@ -235,7 +234,8 @@ def normalize_decomp( paddle.Tensor Computed output. """ - return x / (norm(x, p=p, axis=axis, keepdim=True).clip(min=epsilon)) + return paddle.nn.functional.normalize(x, p, axis, epsilon) + # return x / norm(x, p=p, axis=axis, keepdim=True) # alias for decomposed functions for convinience diff --git a/source/tests/pd/test_change_bias.py b/source/tests/pd/test_change_bias.py new file mode 100644 index 0000000000..2d87b739ff --- /dev/null +++ b/source/tests/pd/test_change_bias.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import tempfile +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.train.training import ( + get_model_for_wrapper, + model_change_out_bias, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_paddle_tensor, +) + +from .common import ( + run_dp, +) +from .model.test_permutation import ( + model_se_e2_a, +) +from .test_finetune import ( + energy_data_requirement, +) + +current_path = os.getcwd() + + +class TestChangeBias(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + model_name = "change-bias-model.ckpt" + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.config["training"]["save_ckpt"] = model_name + self.trainer = get_trainer(deepcopy(self.config)) + self.trainer.run() + self.state_dict_trained = self.trainer.wrapper.model.state_dict() + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + self.sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + self.model_path = Path(current_path) / (model_name + ".pd") + self.model_path_data_bias = Path(current_path) / ( + model_name + "data_bias" + ".pd" + ) + self.model_path_data_file_bias = Path(current_path) / ( + model_name + "data_file_bias" + ".pd" + ) + self.model_path_user_bias = Path(current_path) / ( + model_name + "user_bias" + ".pd" + ) + + def test_change_bias_with_data(self): + run_dp( + f"dp --pd change-bias {self.model_path!s} -s {self.data_file[0]} -o {self.model_path_data_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_data_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_model = model_change_out_bias( + self.trainer.wrapper.model["Default"], + self.sampled, + _bias_adjust_mode="change-by-statistic", + ) + expected_bias = expected_model.get_out_bias() + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def test_change_bias_with_data_sys_file(self): + tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt") + with open(tmp_file.name, "w") as f: + f.writelines([sys + "\n" for sys in self.data_file]) + run_dp( + f"dp --pd change-bias {self.model_path!s} -f {tmp_file.name} -o {self.model_path_data_file_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_data_file_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_model = model_change_out_bias( + self.trainer.wrapper.model["Default"], + self.sampled, + _bias_adjust_mode="change-by-statistic", + ) + expected_bias = expected_model.get_out_bias() + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def test_change_bias_with_user_defined(self): + user_bias = [0.1, 3.2, -0.5] + run_dp( + f"dp --pd change-bias {self.model_path!s} -b {' '.join([str(_) for _ in user_bias])} -o {self.model_path_user_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_user_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_bias = to_paddle_tensor(np.array(user_bias)).reshape( + updated_bias.shape + ) + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("change-bias-model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) diff --git a/source/tests/pd/test_decomp.py b/source/tests/pd/test_decomp.py new file mode 100644 index 0000000000..1716fbacc7 --- /dev/null +++ b/source/tests/pd/test_decomp.py @@ -0,0 +1,141 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + decomp, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +class TestDecomp(unittest.TestCase): + def setUp(self): + paddle.seed(GLOBAL_SEED) + + def test_softmax_decomp(self): + raw_api = paddle.nn.functional.softmax + decomp_api = decomp.softmax + + raw_input = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input) + decomp_output = decomp_api(raw_input) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 1e-6, + 1e-8, + ) + + def test_norm_decomp(self): + raw_api = paddle.linalg.norm + decomp_api = decomp.norm + + raw_input = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input, p=2, axis=-1) + decomp_output = decomp_api(raw_input, p=2, axis=-1) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 1e-5, + 1e-8, + ) + + def test_take_along_axis_decomp(self): + raw_api = paddle.take_along_axis + decomp_api = decomp.take_along_axis + + raw_input = paddle.randn([100, 100], "float32") + raw_indices = paddle.randint(0, 100, [100, 2]) + raw_output = raw_api(raw_input, raw_indices, axis=-1) + decomp_output = decomp_api(raw_input, raw_indices, axis=-1) + + np.testing.assert_equal( + raw_output.numpy(), + decomp_output.numpy(), + ) + + def test_scatter_reduce_decomp(self): + raw_api = paddle.put_along_axis + decomp_api = decomp.scatter_reduce + raw_input = paddle.randn([100, 100], "float32") + axis = 0 + raw_index = paddle.randint(0, 100, [100, 100], "int64") + raw_values = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input, raw_index, raw_values, axis=axis, reduce="add") + decomp_output = decomp_api( + raw_input, axis, raw_index, src=raw_values, reduce="sum" + ) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 2e-5, + 1e-7, + ) + + # raw_output = raw_api(raw_input, raw_index, raw_values, axis=axis, reduce="mean") + # decomp_output = decomp_api(raw_input, axis, raw_index, src=raw_values, reduce="mean") + + # np.testing.assert_allclose( + # raw_output.numpy(), + # decomp_output.numpy(), + # 1e-5, + # 1e-8, + # ) + + def test_sec(self): + shape = [10, 3] + length = shape[0] + size = 3 + + split_sections = decomp.sec(length, size) + assert split_sections == [3, 3, 3, 1] + + def test_masked_add_(self): + decomp_api = decomp.masked_add_ + + raw_input = paddle.randn([10, 10], "float32") + raw_mask = paddle.randint(0, 2, [10, 10]).astype("bool") + add_values = paddle.randn([10, 10], "float32") + raw_output = raw_input.clone() + + for i in range(raw_input.shape[0]): + for j in range(raw_input.shape[1]): + if raw_mask[i][j]: + raw_output[i][j] += add_values[i][j] + + decomp_output = decomp_api(raw_input, raw_mask, add_values[raw_mask]) + + np.testing.assert_equal( + raw_output.numpy(), + decomp_output.numpy(), # inplace + ) + + np.testing.assert_equal( + raw_output.numpy(), + raw_input.numpy(), # inplace + ) + + def test_normalize_decomp(self): + raw_api = paddle.nn.functional.normalize + decomp_api = decomp.normalize_decomp + + raw_input = paddle.randn([100, 100], "float32") + axis = -1 + + raw_output = raw_api(raw_input, p=2, axis=axis) + decomp_output = decomp_api(raw_input, p=2, axis=axis) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), # inplace + 1e-5, + 1e-8, + ) diff --git a/source/tests/pd/test_dp_show.py b/source/tests/pd/test_dp_show.py index 351a7f971f..c1c20ff3a1 100644 --- a/source/tests/pd/test_dp_show.py +++ b/source/tests/pd/test_dp_show.py @@ -47,10 +47,6 @@ def setUp(self): trainer.run() run_dp("dp --pd freeze") - # @unittest.skip( - # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " - # "will be supported in the future." - # ) def test_checkpoint(self): INPUT = "model.pd" ATTRIBUTES = "type-map descriptor fitting-net" @@ -87,10 +83,6 @@ def test_frozen_model(self): in results[-1] ) - # @unittest.skip( - # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " - # "will be supported in the future." - # ) def test_checkpoint_error(self): INPUT = "model.pd" ATTRIBUTES = "model-branch type-map descriptor fitting-net" @@ -156,10 +148,6 @@ def setUp(self): trainer.run() run_dp("dp --pd freeze --head model_1") - # @unittest.skip( - # "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " - # "will be supported in the future." - # ) def test_checkpoint(self): INPUT = "model.ckpt.pd" ATTRIBUTES = "model-branch type-map descriptor fitting-net" From 8bea1bf932b26f9013bedd42c9884015d4e318f8 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 8 Nov 2024 12:10:31 +0800 Subject: [PATCH 26/58] delete record --- deepmd/pd/entrypoints/main.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index e8b6a0d0c7..1d27bbe877 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -80,11 +80,6 @@ ) from deepmd.utils.summary import SummaryPrinter as BaseSummaryPrinter -# from paddle.distributed.elastic.multiprocessing.errors import ( -# record, -# ) - - log = logging.getLogger(__name__) @@ -548,7 +543,6 @@ def change_bias(FLAGS): log.info(f"Saved model to {output_path}") -# @record def main(args: Optional[Union[list[str], argparse.Namespace]] = None): if not isinstance(args, argparse.Namespace): FLAGS = parse_args(args=args) From 8a7875f1ad2022fcbd6673a0e953e974abb6d027 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 8 Nov 2024 13:02:08 +0800 Subject: [PATCH 27/58] remove more unused code and files --- deepmd/pd/infer/deep_eval.py | 88 +-------- deepmd/pd/model/descriptor/__init__.py | 2 - deepmd/pd/model/descriptor/descriptor.py | 11 -- deepmd/pd/model/network/network.py | 240 +---------------------- deepmd/pd/model/task/__init__.py | 6 - deepmd/pd/model/task/ener.py | 171 ---------------- deepmd/pd/model/task/type_predict.py | 47 ----- deepmd/pt/model/network/network.py | 24 +-- source/tests/pd/model/test_null_input.py | 94 +++++++++ source/tests/pd/model/test_smooth.py | 172 ++++++++++++++++ 10 files changed, 281 insertions(+), 574 deletions(-) delete mode 100644 deepmd/pd/model/task/type_predict.py create mode 100644 source/tests/pd/model/test_null_input.py create mode 100644 source/tests/pd/model/test_smooth.py diff --git a/deepmd/pd/infer/deep_eval.py b/deepmd/pd/infer/deep_eval.py index 54018b6728..a2f8510f28 100644 --- a/deepmd/pd/infer/deep_eval.py +++ b/deepmd/pd/infer/deep_eval.py @@ -26,9 +26,6 @@ from deepmd.pd.model.model import ( get_model, ) -from deepmd.pd.model.network.network import ( - TypeEmbedNetConsistent, -) from deepmd.pd.train.wrapper import ( ModelWrapper, ) @@ -434,81 +431,7 @@ def _eval_model_spin( aparam: Optional[np.ndarray], request_defs: list[OutputVariableDef], ): - model = self.dp.to(DEVICE) - - nframes = coords.shape[0] - if len(atom_types.shape) == 1: - natoms = len(atom_types) - atom_types = np.tile(atom_types, nframes).reshape([nframes, -1]) - else: - natoms = len(atom_types[0]) - - coord_input = paddle.to_tensor( - coords.reshape([nframes, natoms, 3]), - dtype=GLOBAL_PD_FLOAT_PRECISION, - place=DEVICE, - ) - type_input = paddle.to_tensor(atom_types, dtype=paddle.int64, place=DEVICE) - spin_input = paddle.to_tensor( - spins.reshape([nframes, natoms, 3]), - dtype=GLOBAL_PD_FLOAT_PRECISION, - place=DEVICE, - ) - if cells is not None: - box_input = paddle.to_tensor( - cells.reshape([nframes, 3, 3]), - dtype=GLOBAL_PD_FLOAT_PRECISION, - place=DEVICE, - ) - else: - box_input = None - if fparam is not None: - fparam_input = to_paddle_tensor( - fparam.reshape([nframes, self.get_dim_fparam()]) - ) - else: - fparam_input = None - if aparam is not None: - aparam_input = to_paddle_tensor( - aparam.reshape([nframes, natoms, self.get_dim_aparam()]) - ) - else: - aparam_input = None - - do_atomic_virial = any( - x.category == OutputVariableCategory.DERV_C_REDU for x in request_defs - ) - batch_output = model( - coord_input, - type_input, - spin=spin_input, - box=box_input, - do_atomic_virial=do_atomic_virial, - fparam=fparam_input, - aparam=aparam_input, - ) - if isinstance(batch_output, tuple): - batch_output = batch_output[0] - - results = [] - for odef in request_defs: - pd_name = self._OUTDEF_DP2BACKEND[odef.name] - if pd_name in batch_output: - shape = self._get_output_shape(odef, nframes, natoms) - out = batch_output[pd_name].reshape(shape).numpy() - results.append(out) - else: - shape = self._get_output_shape(odef, nframes, natoms) - results.append( - np.full( - np.abs(shape), - np.nan, - dtype=NP_PRECISION_DICT[ - RESERVED_PRECISON_DICT[GLOBAL_PD_FLOAT_PRECISION] - ], - ) - ) # this is kinda hacky - return tuple(results) + raise NotImplementedError("_eval_model_spin is not supported yet.") def _get_output_shape(self, odef, nframes, natoms): if odef.category == OutputVariableCategory.DERV_C_REDU: @@ -552,14 +475,7 @@ def eval_typeebd(self) -> np.ndarray: deepmd.pd.model.network.network.TypeEmbedNetConsistent : The type embedding network. """ - out = [] - for mm in self.dp.model["Default"].modules(): - if mm.original_name == TypeEmbedNetConsistent.__name__: - out.append(mm(DEVICE)) - if not out: - raise KeyError("The model has no type embedding networks.") - typeebd = paddle.concat(out, axis=1) - return to_numpy_array(typeebd) + raise NotImplementedError("eval_typeebd is not supported yet.") def get_model_def_script(self) -> str: """Get model definition script.""" diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 0141e4cd03..654643959b 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -4,7 +4,6 @@ ) from .descriptor import ( DescriptorBlock, - make_default_type_embedding, ) from .env_mat import ( prod_env_mat, @@ -17,7 +16,6 @@ __all__ = [ "BaseDescriptor", "DescriptorBlock", - "make_default_type_embedding", "DescrptBlockSeA", "DescrptSeA", "prod_env_mat", diff --git a/deepmd/pd/model/descriptor/descriptor.py b/deepmd/pd/model/descriptor/descriptor.py index b27facd0ae..3d24d2f967 100644 --- a/deepmd/pd/model/descriptor/descriptor.py +++ b/deepmd/pd/model/descriptor/descriptor.py @@ -12,9 +12,6 @@ import paddle -from deepmd.pd.model.network.network import ( - TypeEmbedNet, -) from deepmd.pd.utils import ( env, ) @@ -184,14 +181,6 @@ def need_sorted_nlist_for_lower(self) -> bool: """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" -def make_default_type_embedding( - ntypes, -): - decomp = {} - decomp["tebd_dim"] = 8 - return TypeEmbedNet(ntypes, decomp["tebd_dim"]), decomp - - def extend_descrpt_stat(des, type_map, des_with_stat=None): r""" Extend the statistics of a descriptor block with types from newly provided `type_map`. diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index 21d6586476..f118c234ab 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -4,13 +4,11 @@ Union, ) -import numpy as np import paddle import paddle.nn as nn -import paddle.nn.functional as F -from deepmd.pd.model.network import ( - init, +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, ) from deepmd.pd.model.network.mlp import ( EmbeddingNet, @@ -18,27 +16,15 @@ from deepmd.pd.utils import ( env, ) -from deepmd.utils.version import ( - check_version_compatibility, -) - -try: - from typing import ( - Final, - ) -except ImportError: - from paddle.jit import Final - -from deepmd.dpmodel.utils.type_embed import ( - get_econf_tebd, -) from deepmd.pd.utils.utils import ( - ActivationFn, to_paddle_tensor, ) from deepmd.utils.finetune import ( get_index_between_two_maps, ) +from deepmd.utils.version import ( + check_version_compatibility, +) def Tensor(*shape): @@ -47,222 +33,6 @@ def Tensor(*shape): ) -class SimpleLinear(nn.Layer): - use_timestep: Final[bool] - - def __init__( - self, - num_in, - num_out, - bavg=0.0, - stddev=1.0, - use_timestep=False, - activate=None, - bias: bool = True, - ): - """Construct a linear layer. - - Args: - - num_in: Width of input tensor. - - num_out: Width of output tensor. - - use_timestep: Apply time-step to weight. - - activate: type of activate func. - """ - super().__init__() - self.num_in = num_in - self.num_out = num_out - self.use_timestep = use_timestep - self.activate = ActivationFn(activate) - - self.matrix = self.create_parameter( - [num_in, num_out], - dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ) - init.normal_(self.matrix, std=stddev / np.sqrt(num_out + num_in)) - if bias: - self.bias = self.create_parameter( - (1, num_out), - dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ) - init.normal_(self.bias, mean=bavg, std=stddev) - else: - self.bias = None - if self.use_timestep: - self.idt = self.create_parameter( - (1, num_out), - dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ) - init.normal_(self.idt, mean=0.1, std=0.001) - - def forward(self, inputs): - """Return X*W+b.""" - xw = paddle.matmul(inputs, self.matrix) - hidden = xw + self.bias if self.bias is not None else xw - hidden = self.activate(hidden) - if self.use_timestep: - hidden = hidden * self.idt - return hidden - - -class Linear(nn.Linear): - def __init__( - self, - d_in: int, - d_out: int, - bias: bool = True, - init: str = "default", - ): - super().__init__( - d_in, - d_out, - bias=bias, - dtype=env.GLOBAL_PD_FLOAT_PRECISION, - device=env.DEVICE, - ) - - self.use_bias = bias - - if self.use_bias: - with paddle.no_grad(): - self.bias.fill_(0) - - if init == "default": - self._trunc_normal_init(1.0) - elif init == "relu": - self._trunc_normal_init(2.0) - elif init == "glorot": - self._glorot_uniform_init() - elif init == "gating": - self._zero_init(self.use_bias) - elif init == "normal": - self._normal_init() - elif init == "final": - self._zero_init(False) - else: - raise ValueError("Invalid init method.") - - def _trunc_normal_init(self, scale=1.0): - # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) - TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978 - _, fan_in = self.weight.shape - scale = scale / max(1, fan_in) - std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR - init.trunc_normal_(self.weight, mean=0.0, std=std) - - def _glorot_uniform_init(self): - init.xavier_uniform_(self.weight, gain=1) - - def _zero_init(self, use_bias=True): - with paddle.no_grad(): - self.weight.fill_(0.0) - if use_bias: - with paddle.no_grad(): - self.bias.fill_(1.0) - - def _normal_init(self): - init.kaiming_normal_(self.weight, nonlinearity="linear") - - -class NonLinearHead(nn.Layer): - def __init__(self, input_dim, out_dim, activation_fn, hidden=None): - super().__init__() - hidden = input_dim if not hidden else hidden - self.linear1 = SimpleLinear(input_dim, hidden, activate=activation_fn) - self.linear2 = SimpleLinear(hidden, out_dim) - - def forward(self, x): - x = self.linear1(x) - x = self.linear2(x) - return x - - -class MaskLMHead(nn.Layer): - """Head for masked language modeling.""" - - def __init__(self, embed_dim, output_dim, activation_fn, weight=None): - super().__init__() - self.dense = SimpleLinear(embed_dim, embed_dim) - self.activation_fn = ActivationFn(activation_fn) - self.layer_norm = nn.LayerNorm(embed_dim) - - if weight is None: - weight = nn.Linear(embed_dim, output_dim, bias_attr=False).weight - self.weight = weight.T - self.bias = self.create_parameter( - [output_dim], - dtype=env.GLOBAL_PD_FLOAT_PRECISION, - default_initializer=nn.initializer.Constant(0), # pylint: disable=no-explicit-dtype,no-explicit-device - ) - - def forward( - self, features, masked_tokens: Optional[paddle.Tensor] = None, **kwargs - ): - # Only project the masked tokens while training, - # saves both memory and computation - if masked_tokens is not None: - features = features[masked_tokens, :] - - x = self.dense(features) - x = self.activation_fn(x) - x = self.layer_norm(x) - # project back to size of vocabulary with bias - x = F.linear(x, self.weight) + self.bias - return x - - -class ResidualDeep(nn.Layer): - def __init__( - self, type_id, embedding_width, neuron, bias_atom_e, out_dim=1, resnet_dt=False - ): - """Construct a filter on the given element as neighbor. - - Args: - - typei: Element ID. - - embedding_width: Embedding width per atom. - - neuron: Number of neurons in each hidden layers of the embedding net. - - resnet_dt: Using time-step in the ResNet construction. - """ - super().__init__() - self.type_id = type_id - self.neuron = [embedding_width, *neuron] - self.out_dim = out_dim - - deep_layers = [] - for ii in range(1, len(self.neuron)): - one = SimpleLinear( - num_in=self.neuron[ii - 1], - num_out=self.neuron[ii], - use_timestep=( - resnet_dt and ii > 1 and self.neuron[ii - 1] == self.neuron[ii] - ), - activate="tanh", - ) - deep_layers.append(one) - self.deep_layers = nn.LayerList(deep_layers) - if not env.ENERGY_BIAS_TRAINABLE: - bias_atom_e = 0 - self.final_layer = SimpleLinear(self.neuron[-1], self.out_dim, bias_atom_e) - - def forward(self, inputs): - """Calculate decoded embedding for each atom. - - Args: - - inputs: Embedding net output per atom. Its shape is [nframes*nloc, self.embedding_width]. - - Returns - ------- - - `paddle.Tensor`: Output layer with shape [nframes*nloc, self.neuron[-1]]. - """ - outputs = inputs - for idx, linear in enumerate(self.deep_layers): - if idx > 0 and linear.num_in == linear.num_out: - outputs = outputs + linear(outputs) - else: - outputs = linear(outputs) - outputs = self.final_layer(outputs) - return outputs - - class TypeEmbedNet(nn.Layer): def __init__( self, diff --git a/deepmd/pd/model/task/__init__.py b/deepmd/pd/model/task/__init__.py index 1a36bff30c..ad616156c7 100644 --- a/deepmd/pd/model/task/__init__.py +++ b/deepmd/pd/model/task/__init__.py @@ -4,19 +4,13 @@ ) from .ener import ( EnergyFittingNet, - EnergyFittingNetDirect, ) from .fitting import ( Fitting, ) -from .type_predict import ( - TypePredictNet, -) __all__ = [ "EnergyFittingNet", - "EnergyFittingNetDirect", "Fitting", "BaseFitting", - "TypePredictNet", ] diff --git a/deepmd/pd/model/task/ener.py b/deepmd/pd/model/task/ener.py index 24f563f799..5baef6ce7c 100644 --- a/deepmd/pd/model/task/ener.py +++ b/deepmd/pd/model/task/ener.py @@ -6,17 +6,8 @@ Union, ) -import numpy as np import paddle -from deepmd.dpmodel import ( - FittingOutputDef, - OutputVariableDef, - fitting_check_output, -) -from deepmd.pd.model.network.network import ( - ResidualDeep, -) from deepmd.pd.model.task.fitting import ( Fitting, GeneralFitting, @@ -93,165 +84,3 @@ def serialize(self) -> dict: # make jit happy with paddle 2.0.0 exclude_types: list[int] - - -@Fitting.register("direct_force") -@Fitting.register("direct_force_ener") -@fitting_check_output -class EnergyFittingNetDirect(Fitting): - def __init__( - self, - ntypes, - dim_descrpt, - neuron, - bias_atom_e=None, - out_dim=1, - resnet_dt=True, - use_tebd=True, - return_energy=False, - **kwargs, - ): - """Construct a fitting net for energy. - - Args: - - ntypes: Element count. - - embedding_width: Embedding width per atom. - - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average enery per atom for each element. - - resnet_dt: Using time-step in the ResNet construction. - """ - super().__init__() - self.ntypes = ntypes - self.dim_descrpt = dim_descrpt - self.use_tebd = use_tebd - self.out_dim = out_dim - if bias_atom_e is None: - bias_atom_e = np.zeros([self.ntypes]) # pylint: disable=no-explicit-dtype - if not use_tebd: - assert self.ntypes == len(bias_atom_e), "Element count mismatches!" - bias_atom_e = paddle.to_tensor(bias_atom_e).to(device=env.DEVICE) # pylint: disable=no-explicit-dtype - self.register_buffer("bias_atom_e", bias_atom_e) - - filter_layers_dipole = [] - for type_i in range(self.ntypes): - one = ResidualDeep( - type_i, - dim_descrpt, - neuron, - 0.0, - out_dim=out_dim, - resnet_dt=resnet_dt, - ) - filter_layers_dipole.append(one) - self.filter_layers_dipole = paddle.nn.LayerList(filter_layers_dipole) - - self.return_energy = return_energy - filter_layers = [] - if self.return_energy: - for type_i in range(self.ntypes): - bias_type = 0.0 if self.use_tebd else bias_atom_e[type_i] - one = ResidualDeep( - type_i, dim_descrpt, neuron, bias_type, resnet_dt=resnet_dt - ) - filter_layers.append(one) - self.filter_layers = paddle.nn.LayerList(filter_layers) - - def output_def(self): - return FittingOutputDef( - [ - OutputVariableDef( - "energy", - [1], - reducible=True, - r_differentiable=False, - c_differentiable=False, - ), - OutputVariableDef( - "dforce", - [3], - reducible=False, - r_differentiable=False, - c_differentiable=False, - ), - ] - ) - - def serialize(self) -> dict: - raise NotImplementedError - - def deserialize(self) -> "EnergyFittingNetDirect": - raise NotImplementedError - - def change_type_map( - self, type_map: list[str], model_with_new_type_stat=None - ) -> None: - raise NotImplementedError - - def get_type_map(self) -> list[str]: - raise NotImplementedError - - def forward( - self, - inputs: paddle.Tensor, - atype: paddle.Tensor, - gr: Optional[paddle.Tensor] = None, - g2: Optional[paddle.Tensor] = None, - h2: Optional[paddle.Tensor] = None, - fparam: Optional[paddle.Tensor] = None, - aparam: Optional[paddle.Tensor] = None, - ) -> tuple[paddle.Tensor, None]: - """Based on embedding net output, alculate total energy. - - Args: - - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. - - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. - - Returns - ------- - - `paddle.Tensor`: Total energy with shape [nframes, natoms[0]]. - """ - nframes, nloc, _ = inputs.shape - if self.use_tebd: - # if atype_tebd is not None: - # inputs = paddle.concat([inputs, atype_tebd], axis=-1) - vec_out = self.filter_layers_dipole[0]( - inputs - ) # Shape is [nframes, nloc, m1] - assert list(vec_out.shape) == [nframes, nloc, self.out_dim] - # (nf x nloc) x 1 x od - vec_out = vec_out.reshape([-1, 1, self.out_dim]) - assert gr is not None - # (nf x nloc) x od x 3 - gr = gr.reshape([-1, self.out_dim, 3]) - vec_out = ( - paddle.bmm(vec_out, gr).squeeze(-2).reshape([nframes, nloc, 3]) - ) # Shape is [nframes, nloc, 3] - else: - vec_out = paddle.zeros_like(atype).unsqueeze(-1) # jit assertion - for type_i, filter_layer in enumerate(self.filter_layers_dipole): - mask = atype == type_i - vec_out_type = filter_layer(inputs) # Shape is [nframes, nloc, m1] - vec_out_type = vec_out_type * mask.unsqueeze(-1) - vec_out = vec_out + vec_out_type # Shape is [nframes, natoms[0], 1] - - outs = paddle.zeros_like(atype).unsqueeze(-1) # jit assertion - if self.return_energy: - if self.use_tebd: - atom_energy = self.filter_layers[0](inputs) + self.bias_atom_e[ - atype - ].unsqueeze(-1) - outs = ( - outs.astype(atom_energy.dtype) + atom_energy - ) # Shape is [nframes, natoms[0], 1] - else: - for type_i, filter_layer in enumerate(self.filter_layers): - mask = atype == type_i - atom_energy = filter_layer(inputs) - if not env.ENERGY_BIAS_TRAINABLE: - atom_energy = atom_energy + self.bias_atom_e[type_i] - atom_energy = atom_energy * mask.unsqueeze(-1) - outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] - return { - "energy": outs.to(env.GLOBAL_PD_FLOAT_PRECISION), - "dforce": vec_out, - } diff --git a/deepmd/pd/model/task/type_predict.py b/deepmd/pd/model/task/type_predict.py deleted file mode 100644 index 241d4837d5..0000000000 --- a/deepmd/pd/model/task/type_predict.py +++ /dev/null @@ -1,47 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Optional, -) - -import paddle - -from deepmd.pd.model.network.network import ( - MaskLMHead, -) -from deepmd.pd.model.task import ( - Fitting, -) - - -class TypePredictNet(Fitting): - def __init__(self, feature_dim, ntypes, activation_function="gelu", **kwargs): - """Construct a type predict net. - - Args: - - feature_dim: Input dm. - - ntypes: Numer of types to predict. - - activation_function: Activate function. - """ - super().__init__() - self.feature_dim = feature_dim - self.ntypes = ntypes - self.lm_head = MaskLMHead( - embed_dim=self.feature_dim, - output_dim=ntypes, - activation_fn=activation_function, - weight=None, - ) - - def forward(self, features, masked_tokens: Optional[paddle.Tensor] = None): - """Calculate the predicted logits. - Args: - - features: Input features with shape [nframes, nloc, feature_dim]. - - masked_tokens: Input masked tokens with shape [nframes, nloc]. - - Returns - ------- - - logits: Predicted probs with shape [nframes, nloc, ntypes]. - """ - # [nframes, nloc, ntypes] - logits = self.lm_head(features, masked_tokens=masked_tokens) - return logits diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 88ea108ce7..9bc001581c 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( + Final, Optional, Union, ) @@ -8,29 +9,17 @@ import torch import torch.nn as nn import torch.nn.functional as F +import torch.utils.checkpoint +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, +) from deepmd.pt.model.network.mlp import ( EmbeddingNet, ) from deepmd.pt.utils import ( env, ) -from deepmd.utils.version import ( - check_version_compatibility, -) - -try: - from typing import ( - Final, - ) -except ImportError: - from torch.jit import Final - -import torch.utils.checkpoint - -from deepmd.dpmodel.utils.type_embed import ( - get_econf_tebd, -) from deepmd.pt.utils.utils import ( ActivationFn, to_torch_tensor, @@ -38,6 +27,9 @@ from deepmd.utils.finetune import ( get_index_between_two_maps, ) +from deepmd.utils.version import ( + check_version_compatibility, +) def Tensor(*shape): diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py new file mode 100644 index 0000000000..9bf0860265 --- /dev/null +++ b/source/tests/pd/model/test_null_input.py @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_se_e2_a, +) + +dtype = paddle.float64 + + +class NullTest: + def test_nloc_1( + self, + ): + natoms = 1 + generator = paddle.seed(GLOBAL_SEED) + # paddle.seed(1000) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + # large box to exclude images + cell = (cell + cell.T) + 100.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0], dtype=paddle.int32).to(device=env.DEVICE) + test_keys = ["energy", "force", "virial"] + result = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + ret0 = {key: result[key].squeeze(0) for key in test_keys} + prec = 1e-10 + expect_e_shape = [1] + expect_f = paddle.zeros([natoms, 3], dtype=dtype).to(device=env.DEVICE) + expect_v = paddle.zeros([9], dtype=dtype).to(device=env.DEVICE) + self.assertEqual(list(ret0["energy"].shape), expect_e_shape) + self.assertFalse(np.isnan(to_numpy_array(ret0["energy"])[0])) + np.testing.assert_allclose( + ret0["force"].numpy(), expect_f.numpy(), rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0["virial"].numpy(), expect_v.numpy(), rtol=prec, atol=prec + ) + + def test_nloc_2_far( + self, + ): + natoms = 2 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + # large box to exclude images + cell = (cell + cell.T) + 3000.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([1, 3], dtype=dtype).to(device=env.DEVICE) + # 2 far-away atoms + coord = paddle.concat([coord, coord + 100.0], axis=0) + atype = paddle.to_tensor([0, 2], dtype=paddle.int32).to(device=env.DEVICE) + test_keys = ["energy", "force", "virial"] + result = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + ret0 = {key: result[key].squeeze(0) for key in test_keys} + prec = 1e-10 + expect_e_shape = [1] + expect_f = paddle.zeros([natoms, 3], dtype=dtype).to(device=env.DEVICE) + expect_v = paddle.zeros([9], dtype=dtype).to(device=env.DEVICE) + self.assertEqual(list(ret0["energy"].shape), expect_e_shape) + self.assertFalse(np.isnan(to_numpy_array(ret0["energy"])[0])) + np.testing.assert_allclose( + ret0["force"].numpy(), expect_f.numpy(), rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0["virial"].numpy(), expect_v.numpy(), rtol=prec, atol=prec + ) + + +class TestEnergyModelSeA(unittest.TestCase, NullTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py new file mode 100644 index 0000000000..7f77a6f188 --- /dev/null +++ b/source/tests/pd/model/test_smooth.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_se_e2_a, +) + +dtype = paddle.float64 + + +class SmoothTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + # displacement of atoms + epsilon = 1e-5 if self.epsilon is None else self.epsilon + # required prec. relative prec is not checked. + rprec = 0.0 + aprec = 1e-5 if self.aprec is None else self.aprec + + natoms = 10 + cell = 8.6 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + atype0 = paddle.arange(3, dtype=dtype).to(device=env.DEVICE) + atype1 = paddle.randint(0, 3, [natoms - 3]).to( + device=env.DEVICE, dtype=atype0.dtype + ) + atype = paddle.concat([atype0, atype1]).reshape([natoms]) + coord0 = ( + paddle.to_tensor( + [ + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + ], + dtype=dtype, + ) + .reshape([-1, 3]) + .to(device=env.DEVICE) + ) + coord1 = paddle.rand( + [natoms - coord0.shape[0], 3], + dtype=dtype, + ).to(device=env.DEVICE) + coord1 = paddle.matmul(coord1, cell) + coord = paddle.concat([coord0, coord1], axis=0) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord0 = paddle.clone(coord) + coord1 = paddle.clone(coord) + coord1[1][0] += epsilon + coord2 = paddle.clone(coord) + coord2[2][1] += epsilon + coord3 = paddle.clone(coord) + coord3[1][0] += epsilon + coord3[2][1] += epsilon + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + + result_0 = eval_model( + self.model, + coord0.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord1.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + result_2 = eval_model( + self.model, + coord2.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret2 = {key: result_2[key].squeeze(0) for key in test_keys} + result_3 = eval_model( + self.model, + coord3.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret3 = {key: result_3[key].squeeze(0) for key in test_keys} + + def compare(ret0, ret1): + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=rprec, atol=aprec + ) + elif key in ["force", "force_mag"]: + # plus 1. to avoid the divided-by-zero issue + np.testing.assert_allclose( + (1.0 + ret0[key]).numpy(), + (1.0 + ret1[key]).numpy(), + rtol=rprec, + atol=aprec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + (1.0 + ret0[key]).numpy(), + (1.0 + ret1[key]).numpy(), + rtol=rprec, + atol=aprec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + compare(ret0, ret1) + compare(ret1, ret2) + compare(ret0, ret3) + + +class TestEnergyModelSeA(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# self.model = EnergyModelDPAUni(model_params).to(env.DEVICE) + +# natoms = 5 +# cell = paddle.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * paddle.eye(3) +# coord = paddle.rand([natoms, 3], dtype=dtype) +# coord = paddle.matmul(coord, cell) +# atype = paddle.to_tensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() From 71a3c0a1c05dc394e5d21abf11710b6d694d2e05 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 05:05:27 +0000 Subject: [PATCH 28/58] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pd/model/atomic_model/energy_atomic_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deepmd/pd/model/atomic_model/energy_atomic_model.py b/deepmd/pd/model/atomic_model/energy_atomic_model.py index 40da920ba9..708ec9db7f 100644 --- a/deepmd/pd/model/atomic_model/energy_atomic_model.py +++ b/deepmd/pd/model/atomic_model/energy_atomic_model.py @@ -11,8 +11,7 @@ class DPEnergyAtomicModel(DPAtomicModel): def __init__(self, descriptor, fitting, type_map, **kwargs): - assert ( - isinstance(fitting, EnergyFittingNet) - or isinstance(fitting, InvarFitting) + assert isinstance(fitting, EnergyFittingNet) or isinstance( + fitting, InvarFitting ) super().__init__(descriptor, fitting, type_map, **kwargs) From ede5047bfa56edbb20a048c48e70896ea606a8b7 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 8 Nov 2024 13:06:48 +0800 Subject: [PATCH 29/58] remove redundant annotations --- source/tests/pd/test_decomp.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/source/tests/pd/test_decomp.py b/source/tests/pd/test_decomp.py index 1716fbacc7..d8439ad994 100644 --- a/source/tests/pd/test_decomp.py +++ b/source/tests/pd/test_decomp.py @@ -80,16 +80,6 @@ def test_scatter_reduce_decomp(self): 1e-7, ) - # raw_output = raw_api(raw_input, raw_index, raw_values, axis=axis, reduce="mean") - # decomp_output = decomp_api(raw_input, axis, raw_index, src=raw_values, reduce="mean") - - # np.testing.assert_allclose( - # raw_output.numpy(), - # decomp_output.numpy(), - # 1e-5, - # 1e-8, - # ) - def test_sec(self): shape = [10, 3] length = shape[0] From b7a8cec66a3eb3ae4006db2698790fa9a2054b67 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 8 Nov 2024 21:29:19 +0800 Subject: [PATCH 30/58] add nvtx profiler code in training, which is more accurate and detailed --- deepmd/pd/train/training.py | 82 ++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 29 deletions(-) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 9b5fc1fbb4..af73481ec5 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -2,6 +2,9 @@ import functools import logging import time +from contextlib import ( + contextmanager, +) from copy import ( deepcopy, ) @@ -18,6 +21,9 @@ from paddle.distributed import ( fleet, ) +from paddle.framework import ( + core, +) from paddle.io import ( DataLoader, ) @@ -647,23 +653,15 @@ def run(self): ) writer = SummaryWriter(log_dir=self.tensorboard_log_dir) - if self.enable_profiler or self.profiling: - prof = paddle.profiler.profile( - schedule=paddle.profiler.schedule(wait=1, warmup=1, active=3, repeat=1), - on_trace_ready=paddle.profiler.tensorboard_trace_handler( - self.tensorboard_log_dir - ) - if self.enable_profiler - else None, - record_shapes=True, - with_stack=True, - ) - prof.start() + enable_profiling = self.enable_profiler or self.profiling + if enable_profiling: + core.nvprof_start() + core.nvprof_enable_record_event() def step(_step_id, task_key="Default"): # Paddle Profiler - if self.enable_profiler or self.profiling: - prof.step() + if enable_profiling: + core.nvprof_nvtx_push(f"Training step {_step_id}") self.wrapper.train() if isinstance(self.lr_exp, dict): _lr = self.lr_exp[task_key] @@ -685,19 +683,33 @@ def step(_step_id, task_key="Default"): pref_lr = _lr.start_lr else: pref_lr = cur_lr - model_pred, loss, more_loss = self.wrapper( - **input_dict, cur_lr=pref_lr, label=label_dict, task_key=task_key - ) - loss.backward() - if self.gradient_max_norm > 0.0: - grad_norm = paddle.nn.utils.clip_grad_norm_( - self.wrapper.parameters(), self.gradient_max_norm + with nvprof_context(enable_profiling, "Forward pass"): + model_pred, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=task_key, ) + + with nvprof_context(enable_profiling, "Backward pass"): + loss.backward() + + if self.gradient_max_norm > 0.0: + with nvprof_context(enable_profiling, "Gradient clip"): + grad_norm = paddle.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), self.gradient_max_norm + ) if not paddle.isfinite(grad_norm).all(): # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") - self.optimizer.step() + + with nvprof_context(enable_profiling, "Adam update"): + self.optimizer.step() + self.scheduler.step() + + if enable_profiling: + core.nvprof_nvtx_pop() else: raise ValueError(f"Not supported optimizer type '{self.opt_type}'") @@ -955,13 +967,12 @@ def log_loss_valid(_task_key="Default"): fout1.close() if self.enable_tensorboard: writer.close() - if self.enable_profiler or self.profiling: - prof.stop() - if self.profiling: - prof.export_chrome_trace(self.profiling_file) - log.info( - f"The profiling trace have been saved to: {self.profiling_file}" - ) + if enable_profiling: + core.nvprof_stop() + log.info( + "The nsys profiling trace have been saved to *.nsys-rep and *.sqlite " + "files, which can be viewd in NVIDIA Nsight Systems software" + ) def save_model(self, save_path: Path, lr=0.0, step=0): module = ( @@ -1199,3 +1210,16 @@ def model_change_out_bias( f"to {to_numpy_array(new_bias).reshape(-1)!s}." ) return _model + + +@contextmanager +def nvprof_context(enable_profiler: bool, name: str): + if enable_profiler: + core.nvprof_nvtx_push(name) + + try: + yield + + finally: + if enable_profiler: + core.nvprof_nvtx_pop() From 416fec8cb1e5940cd4bbbff6ff5af0eb1dd5a3fa Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 12:36:58 +0800 Subject: [PATCH 31/58] update code as devel and fix typo --- deepmd/dpmodel/model/make_model.py | 2 +- deepmd/pd/entrypoints/main.py | 206 ++++++------ deepmd/pd/infer/inference.py | 2 - deepmd/pd/loss/loss.py | 25 +- deepmd/pd/model/__init__.py | 5 - .../model/atomic_model/base_atomic_model.py | 11 +- .../pd/model/atomic_model/dp_atomic_model.py | 6 +- deepmd/pd/model/backbone/__init__.py | 8 - deepmd/pd/model/backbone/backbone.py | 12 - deepmd/pd/model/descriptor/descriptor.py | 14 +- deepmd/pd/model/descriptor/se_a.py | 62 +++- deepmd/pd/model/model/dp_model.py | 12 +- deepmd/pd/model/model/frozen.py | 4 +- deepmd/pd/model/model/make_model.py | 49 ++- deepmd/pd/model/model/transform_output.py | 16 +- deepmd/pd/model/network/layernorm.py | 4 + deepmd/pd/model/task/ener.py | 1 - deepmd/pd/model/task/fitting.py | 31 +- deepmd/pd/model/task/invar_fitting.py | 16 +- deepmd/pd/train/training.py | 13 +- deepmd/pd/train/wrapper.py | 5 +- deepmd/pd/utils/env_mat_stat.py | 8 +- deepmd/pd/utils/learning_rate.py | 57 +--- deepmd/pd/utils/neighbor_stat.py | 4 +- deepmd/pd/utils/nlist.py | 19 +- deepmd/pd/utils/preprocess.py | 296 ------------------ deepmd/pd/utils/region.py | 18 +- deepmd/pd/utils/serialization.py | 27 +- deepmd/pd/utils/stat.py | 16 +- source/tests/consistent/common.py | 19 +- source/tests/consistent/model/test_ener.py | 7 +- source/tests/consistent/test_neighbor_stat.py | 5 + source/tests/pd/model/test_descriptor.py | 10 +- source/tests/pd/model/test_embedding_net.py | 14 +- source/tests/pd/model/test_jit.py | 2 +- source/tests/pd/test_lr.py | 4 +- 36 files changed, 374 insertions(+), 636 deletions(-) delete mode 100644 deepmd/pd/model/backbone/__init__.py delete mode 100644 deepmd/pd/model/backbone/backbone.py diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index 95d97262df..f082916c9d 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -455,7 +455,7 @@ def format_nlist( Returns ------- - formated_nlist + formatted_nlist the formatted nlist. """ diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index 1d27bbe877..fef160c0b0 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -232,16 +232,27 @@ def get_backend_info(self) -> dict: } -def train(FLAGS): - log.info("Configuration path: %s", FLAGS.INPUT) +def train( + input_file: str, + init_model: Optional[str], + restart: Optional[str], + finetune: Optional[str], + init_frz_model: Optional[str], + model_branch: str, + skip_neighbor_stat: bool = False, + use_pretrain_script: bool = False, + force_load: bool = False, + output: str = "out.json", +): + log.info("Configuration path: %s", input_file) SummaryPrinter()() - with open(FLAGS.INPUT) as fin: + with open(input_file) as fin: config = json.load(fin) # ensure suffix, as in the command line help, we say "path prefix of checkpoint files" - if FLAGS.init_model is not None and not FLAGS.init_model.endswith(".pd"): - FLAGS.init_model += ".pd" - if FLAGS.restart is not None and not FLAGS.restart.endswith(".pd"): - FLAGS.restart += ".pd" + if init_model is not None and not init_model.endswith(".pd"): + init_model += ".pd" + if restart is not None and not restart.endswith(".pd"): + restart += ".pd" # update multitask config multi_task = "model_dict" in config["model"] @@ -255,24 +266,22 @@ def train(FLAGS): # update fine-tuning config finetune_links = None - if FLAGS.finetune is not None: + if finetune is not None: config["model"], finetune_links = get_finetune_rules( - FLAGS.finetune, + finetune, config["model"], - model_branch=FLAGS.model_branch, - change_model_params=FLAGS.use_pretrain_script, + model_branch=model_branch, + change_model_params=use_pretrain_script, ) # update init_model or init_frz_model config if necessary - if ( - FLAGS.init_model is not None or FLAGS.init_frz_model is not None - ) and FLAGS.use_pretrain_script: - if FLAGS.init_model is not None: - init_state_dict = paddle.load(FLAGS.init_model) + if (init_model is not None or init_frz_model is not None) and use_pretrain_script: + if init_model is not None: + init_state_dict = paddle.load(init_model) if "model" in init_state_dict: init_state_dict = init_state_dict["model"] config["model"] = init_state_dict["_extra_state"]["model_params"] else: - raise NotImplementedError("FLAGS.init_model can not be empty.") + raise NotImplementedError("init_frz_model is not supported yet") # argcheck config = update_deepmd_input(config, warning=True, dump="input_v2_compat.json") @@ -280,7 +289,7 @@ def train(FLAGS): # do neighbor stat min_nbor_dist = None - if not FLAGS.skip_neighbor_stat: + if not skip_neighbor_stat: log.info( "Calculate neighbor statistics... (add --skip-neighbor-stat to skip this step)" ) @@ -309,16 +318,16 @@ def train(FLAGS): ) ) - with open(FLAGS.output, "w") as fp: + with open(output, "w") as fp: json.dump(config, fp, indent=4) trainer = get_trainer( config, - FLAGS.init_model, - FLAGS.restart, - FLAGS.finetune, - FLAGS.force_load, - FLAGS.init_frz_model, + init_model, + restart, + finetune, + force_load, + init_frz_model, shared_links=shared_links, finetune_links=finetune_links, ) @@ -332,7 +341,11 @@ def train(FLAGS): trainer.run() -def freeze(FLAGS): +def freeze( + model: str, + output: str = "frozen_model.pth", + head: Optional[str] = None, +): paddle.set_flags( { "FLAGS_save_cf_stack_op": 1, @@ -340,7 +353,7 @@ def freeze(FLAGS): "FLAGS_enable_pir_api": 1, } ) - model = inference.Tester(FLAGS.model, head=FLAGS.head).model + model = inference.Tester(model, head=head).model model.eval() from paddle.static import ( InputSpec, @@ -362,80 +375,30 @@ def freeze(FLAGS): InputSpec([-1, -1, -1], dtype="int32", name="nlist"), ], ) - if FLAGS.output.endswith(".json"): - FLAGS.output = FLAGS.output[:-5] + if output.endswith(".json"): + output = output[:-5] paddle.jit.save( jit_model, - path=FLAGS.output, + path=output, skip_prune_program=True, ) log.info( - f"Paddle inference model has been exported to: {FLAGS.output}.json and {FLAGS.output}.pdiparams" + f"Paddle inference model has been exported to: {output}.json and {output}.pdiparams" ) -def show(FLAGS): - if FLAGS.INPUT.split(".")[-1] == "pd": - state_dict = paddle.load(FLAGS.INPUT) - if "model" in state_dict: - state_dict = state_dict["model"] - model_params = state_dict["_extra_state"]["model_params"] - else: - raise RuntimeError( - "The model provided must be a checkpoint file with a .pd extension" - ) - model_is_multi_task = "model_dict" in model_params - log.info("This is a multitask model") if model_is_multi_task else log.info( - "This is a singletask model" - ) - - if "model-branch" in FLAGS.ATTRIBUTES: - # The model must be multitask mode - if not model_is_multi_task: - raise RuntimeError( - "The 'model-branch' option requires a multitask model." - " The provided model does not meet this criterion." - ) - model_branches = list(model_params["model_dict"].keys()) - model_branches += ["RANDOM"] - log.info( - f"Available model branches are {model_branches}, " - f"where 'RANDOM' means using a randomly initialized fitting net." - ) - if "type-map" in FLAGS.ATTRIBUTES: - if model_is_multi_task: - model_branches = list(model_params["model_dict"].keys()) - for branch in model_branches: - type_map = model_params["model_dict"][branch]["type_map"] - log.info(f"The type_map of branch {branch} is {type_map}") - else: - type_map = model_params["type_map"] - log.info(f"The type_map is {type_map}") - if "descriptor" in FLAGS.ATTRIBUTES: - if model_is_multi_task: - model_branches = list(model_params["model_dict"].keys()) - for branch in model_branches: - descriptor = model_params["model_dict"][branch]["descriptor"] - log.info(f"The descriptor parameter of branch {branch} is {descriptor}") - else: - descriptor = model_params["descriptor"] - log.info(f"The descriptor parameter is {descriptor}") - if "fitting-net" in FLAGS.ATTRIBUTES: - if model_is_multi_task: - model_branches = list(model_params["model_dict"].keys()) - for branch in model_branches: - fitting_net = model_params["model_dict"][branch]["fitting_net"] - log.info( - f"The fitting_net parameter of branch {branch} is {fitting_net}" - ) - else: - fitting_net = model_params["fitting_net"] - log.info(f"The fitting_net parameter is {fitting_net}") - - -def change_bias(FLAGS): - if FLAGS.INPUT.endswith(".pd"): - old_state_dict = paddle.load(FLAGS.INPUT) +def change_bias( + input_file: str, + mode: str = "change", + bias_value: Optional[list] = None, + datafile: Optional[str] = None, + system: str = ".", + numb_batch: int = 0, + model_branch: Optional[str] = None, + output: Optional[str] = None, +): + if input_file.endswith(".pd"): + old_state_dict = paddle.load(input_file) model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) model_params = model_state_dict["_extra_state"]["model_params"] else: @@ -444,10 +407,7 @@ def change_bias(FLAGS): "Please provided a checkpoint file with a .pd extension" ) multi_task = "model_dict" in model_params - model_branch = FLAGS.model_branch - bias_adjust_mode = ( - "change-by-statistic" if FLAGS.mode == "change" else "set-by-statistic" - ) + bias_adjust_mode = "change-by-statistic" if mode == "change" else "set-by-statistic" if multi_task: assert ( model_branch is not None @@ -464,23 +424,23 @@ def change_bias(FLAGS): else model_params["model_dict"][model_branch]["type_map"] ) model_to_change = model if not multi_task else model[model_branch] - if FLAGS.INPUT.endswith(".pd"): + if input_file.endswith(".pd"): wrapper = ModelWrapper(model) wrapper.set_state_dict(old_state_dict["model"]) else: raise NotImplementedError("Only support .pd file") - if FLAGS.bias_value is not None: + if bias_value is not None: # use user-defined bias assert model_to_change.model_type in [ "ener" ], "User-defined bias is only available for energy model!" assert ( - len(FLAGS.bias_value) == len(type_map) + len(bias_value) == len(type_map) ), f"The number of elements in the bias should be the same as that in the type_map: {type_map}." old_bias = model_to_change.get_out_bias() bias_to_set = paddle.to_tensor( - FLAGS.bias_value, dtype=old_bias.dtype, place=old_bias.place + bias_value, dtype=old_bias.dtype, place=old_bias.place ).reshape(old_bias.shape) model_to_change.set_out_bias(bias_to_set) log.info( @@ -491,11 +451,11 @@ def change_bias(FLAGS): updated_model = model_to_change else: # calculate bias on given systems - if FLAGS.datafile is not None: - with open(FLAGS.datafile) as datalist: + if datafile is not None: + with open(datafile) as datalist: all_sys = datalist.read().splitlines() else: - all_sys = expand_sys_str(FLAGS.system) + all_sys = expand_sys_str(system) data_systems = process_systems(all_sys) data_single = DpLoaderSet( data_systems, @@ -508,7 +468,7 @@ def change_bias(FLAGS): data_requirement = mock_loss.label_requirement data_requirement += training.get_additional_data_requirement(model_to_change) data_single.add_data_requirement(data_requirement) - nbatches = FLAGS.numb_batch if FLAGS.numb_batch != 0 else float("inf") + nbatches = numb_batch if numb_batch != 0 else float("inf") sampled_data = make_stat_input( data_single.systems, data_single.dataloaders, @@ -523,11 +483,9 @@ def change_bias(FLAGS): else: model[model_branch] = updated_model - if FLAGS.INPUT.endswith(".pd"): + if input_file.endswith(".pd"): output_path = ( - FLAGS.output - if FLAGS.output is not None - else FLAGS.INPUT.replace(".pd", "_updated.pd") + output if output is not None else input_file.replace(".pd", "_updated.pd") ) wrapper = ModelWrapper(model) if "model" in old_state_dict: @@ -551,14 +509,25 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): set_log_handles( FLAGS.log_level, - Path(FLAGS.log_path) if FLAGS.log_path is not None else None, + Path(FLAGS.log_path) if FLAGS.log_path else None, mpi_log=None, ) log.debug("Log handles were successfully set") log.info("DeePMD version: %s", __version__) if FLAGS.command == "train": - train(FLAGS) + train( + input_file=FLAGS.INPUT, + init_model=FLAGS.init_model, + restart=FLAGS.restart, + finetune=FLAGS.finetune, + init_frz_model=FLAGS.init_frz_model, + model_branch=FLAGS.model_branch, + skip_neighbor_stat=FLAGS.skip_neighbor_stat, + use_pretrain_script=FLAGS.use_pretrain_script, + force_load=FLAGS.force_load, + output=FLAGS.output, + ) elif FLAGS.command == "freeze": if Path(FLAGS.checkpoint_folder).is_dir(): checkpoint_path = Path(FLAGS.checkpoint_folder) @@ -566,12 +535,19 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): FLAGS.model = str(checkpoint_path.joinpath(latest_ckpt_file)) else: FLAGS.model = FLAGS.checkpoint_folder - FLAGS.output = str(Path(FLAGS.output).with_suffix("")) - freeze(FLAGS) - elif FLAGS.command == "show": - show(FLAGS) + FLAGS.output = str(Path(FLAGS.output).with_suffix(".json")) + freeze(model=FLAGS.model, output=FLAGS.output, head=FLAGS.head) elif FLAGS.command == "change-bias": - change_bias(FLAGS) + change_bias( + input_file=FLAGS.INPUT, + mode=FLAGS.mode, + bias_value=FLAGS.bias_value, + datafile=FLAGS.datafile, + system=FLAGS.system, + numb_batch=FLAGS.numb_batch, + model_branch=FLAGS.model_branch, + output=FLAGS.output, + ) else: raise RuntimeError(f"Invalid command {FLAGS.command}!") diff --git a/deepmd/pd/infer/inference.py b/deepmd/pd/infer/inference.py index 1ebadd24c9..ae1b8e8516 100644 --- a/deepmd/pd/infer/inference.py +++ b/deepmd/pd/infer/inference.py @@ -17,8 +17,6 @@ JIT, ) -# if paddle.__version__.startswith("2"): -# import paddle._dynamo log = logging.getLogger(__name__) diff --git a/deepmd/pd/loss/loss.py b/deepmd/pd/loss/loss.py index c083996720..f825f9ff61 100644 --- a/deepmd/pd/loss/loss.py +++ b/deepmd/pd/loss/loss.py @@ -9,9 +9,12 @@ from deepmd.utils.data import ( DataRequirementItem, ) +from deepmd.utils.plugin import ( + make_plugin_registry, +) -class TaskLoss(paddle.nn.Layer, ABC): +class TaskLoss(paddle.nn.Layer, ABC, make_plugin_registry("loss")): def __init__(self, **kwargs): """Construct loss.""" super().__init__() @@ -38,3 +41,23 @@ def display_if_exist(loss: paddle.Tensor, find_property: float) -> paddle.Tensor whether the property is found """ return loss if bool(find_property) else paddle.to_tensor(float("nan")) + + @classmethod + def get_loss(cls, loss_params: dict) -> "TaskLoss": + """Get the loss module by the parameters. + + By default, all the parameters are directly passed to the constructor. + If not, override this method. + + Parameters + ---------- + loss_params : dict + The loss parameters + + Returns + ------- + TaskLoss + The loss module + """ + loss = cls(**loss_params) + return loss diff --git a/deepmd/pd/model/__init__.py b/deepmd/pd/model/__init__.py index 171d147114..6ceb116d85 100644 --- a/deepmd/pd/model/__init__.py +++ b/deepmd/pd/model/__init__.py @@ -1,6 +1 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from deepmd.utils.entry_point import ( - load_entry_point, -) - -load_entry_point("deepmd.pd") diff --git a/deepmd/pd/model/atomic_model/base_atomic_model.py b/deepmd/pd/model/atomic_model/base_atomic_model.py index 44553482c6..1100813fb4 100644 --- a/deepmd/pd/model/atomic_model/base_atomic_model.py +++ b/deepmd/pd/model/atomic_model/base_atomic_model.py @@ -8,6 +8,7 @@ Union, ) +import numpy as np import paddle from deepmd.dpmodel.atomic_model import ( @@ -67,7 +68,7 @@ class BaseAtomicModel(paddle.nn.Layer, BaseAtomicModel_): Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. """ @@ -77,7 +78,7 @@ def __init__( atom_exclude_types: list[int] = [], pair_exclude_types: list[tuple[int, int]] = [], rcond: Optional[float] = None, - preset_out_bias: Optional[dict[str, paddle.Tensor]] = None, + preset_out_bias: Optional[dict[str, np.ndarray]] = None, ): paddle.nn.Layer.__init__(self) BaseAtomicModel_.__init__(self) @@ -148,7 +149,7 @@ def make_atom_mask( self, atype: paddle.Tensor, ) -> paddle.Tensor: - """The atoms with type < 0 are treated as virutal atoms, + """The atoms with type < 0 are treated as virtual atoms, which serves as place-holders for multi-frame calculations with different number of atoms in different frames. @@ -160,7 +161,7 @@ def make_atom_mask( Returns ------- mask - True for real atoms and False for virutal atoms. + True for real atoms and False for virtual atoms. """ # supposed to be supported by all backends @@ -200,7 +201,7 @@ def forward_common_atomic( Parameters ---------- extended_coord - extended coodinates, shape: nf x (nall x 3) + extended coordinates, shape: nf x (nall x 3) extended_atype extended atom typs, shape: nf x nall for a type < 0 indicating the atomic is virtual. diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py index 45eb9ca1cb..47b881e0cc 100644 --- a/deepmd/pd/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -46,8 +46,6 @@ class DPAtomicModel(BaseAtomicModel): For example `type_map[1]` gives the name of the type 1. """ - eval_descriptor_list: list[paddle.Tensor] - def __init__( self, descriptor, @@ -114,6 +112,8 @@ def _string_to_array(s: str) -> list[int]: ) self.buffer_aparam_nall.name = "buffer_aparam_nall" + eval_descriptor_list: list[paddle.Tensor] + def set_eval_descriptor_hook(self, enable: bool) -> None: """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" self.enable_eval_descriptor_hook = enable @@ -220,7 +220,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region + coordinates in extended region extended_atype atomic type in extended region nlist diff --git a/deepmd/pd/model/backbone/__init__.py b/deepmd/pd/model/backbone/__init__.py deleted file mode 100644 index f7948285cf..0000000000 --- a/deepmd/pd/model/backbone/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from .backbone import ( - BackBone, -) - -__all__ = [ - "BackBone", -] diff --git a/deepmd/pd/model/backbone/backbone.py b/deepmd/pd/model/backbone/backbone.py deleted file mode 100644 index f37346a44f..0000000000 --- a/deepmd/pd/model/backbone/backbone.py +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import paddle - - -class BackBone(paddle.nn.Layer): - def __init__(self, **kwargs): - """BackBone base method.""" - super().__init__() - - def forward(self, **kwargs): - """Calculate backBone.""" - raise NotImplementedError diff --git a/deepmd/pd/model/descriptor/descriptor.py b/deepmd/pd/model/descriptor/descriptor.py index 3d24d2f967..36de5b1948 100644 --- a/deepmd/pd/model/descriptor/descriptor.py +++ b/deepmd/pd/model/descriptor/descriptor.py @@ -105,11 +105,11 @@ def compute_input_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. path : Optional[DPPath] @@ -126,7 +126,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -145,11 +145,11 @@ def share_params(self, base_class, shared_level, resume=False): paddle.assign( paddle.to_tensor(mean).to(device=env.DEVICE), base_class.mean, - ) # pylint: disable=no-explicit-dtype + ) paddle.assign( paddle.to_tensor(stddev).to(device=env.DEVICE), base_class.stddev, - ) # pylint: disable=no-explicit-dtype + ) # must share, even if not do stat self.mean = base_class.mean self.stddev = base_class.stddev @@ -196,7 +196,7 @@ def extend_descrpt_stat(des, type_map, des_with_stat=None): ---------- des : DescriptorBlock The descriptor block to be extended. - type_map : List[str] + type_map : list[str] The name of each type of atoms to be extended. des_with_stat : DescriptorBlock, Optional The descriptor block has additional statistics of types from newly provided `type_map`. diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 124fa3a88f..216d32783e 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -48,7 +48,7 @@ Final, ) except ImportError: - from paddle.jit import Final + pass from deepmd.dpmodel.utils import EnvMat as DPEnvMat from deepmd.pd.model.network.mlp import ( @@ -93,6 +93,7 @@ def __init__( raise NotImplementedError("old implementation of spin is not supported.") super().__init__() self.type_map = type_map + self.compress = False self.sea = DescrptBlockSeA( rcut, rcut_smth, @@ -164,7 +165,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -342,7 +343,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -366,6 +367,10 @@ def update_sel( class DescrptBlockSeA(DescriptorBlock): ndescrpt: Final[int] __constants__: ClassVar[list] = ["ndescrpt"] + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, paddle.Tensor] + table_config: list[Union[int, float]] def __init__( self, @@ -395,8 +400,8 @@ def __init__( - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. """ super().__init__() - self.rcut = rcut - self.rcut_smth = rcut_smth + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) self.neuron = neuron self.filter_neuron = self.neuron self.axis_neuron = axis_neuron @@ -425,6 +430,13 @@ def __init__( self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) + # add for compression + self.compress = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] + ndim = 1 if self.type_one_side else 2 filter_layers = NetworkCollection( ndim=ndim, ntypes=len(sel), network_type="embedding_network" @@ -443,6 +455,7 @@ def __init__( self.filter_layers = filter_layers self.stats = None # set trainable + self.trainable = trainable for param in self.parameters(): param.stop_gradient = not trainable @@ -470,6 +483,10 @@ def get_dim_out(self) -> int: """Returns the output dimension.""" return self.dim_out + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + def get_dim_emb(self) -> int: """Returns the output dimension.""" return self.neuron[-1] @@ -574,6 +591,19 @@ def reinit_exclude( self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + self.compress = True + self.table_data = table_data + self.table_config = table_config + self.lower = lower + self.upper = upper + def forward( self, nlist: paddle.Tensor, @@ -609,7 +639,6 @@ def forward( protection=self.env_protection, ) - assert self.filter_layers is not None dmatrix = dmatrix.reshape([-1, self.nnei, 4]) dmatrix = dmatrix.astype(self.prec) nfnl = dmatrix.shape[0] @@ -623,6 +652,7 @@ def forward( for embedding_idx, ll in enumerate(self.filter_layers.networks): if self.type_one_side: ii = embedding_idx + ti = -1 # paddle.jit is not happy with slice(None) # ti_mask = paddle.ones(nfnl, dtype=paddle.bool, device=dmatrix.place) # applying a mask seems to cause performance degradation @@ -642,17 +672,23 @@ def forward( rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] else: rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] - if rr.numel() > 0: - rr = rr * mm.unsqueeze(2).astype(rr.dtype) - ss = rr[:, :, :1] + rr = rr * mm[:, :, None].astype(rr.dtype) + ss = rr[:, :, :1] + + if self.compress: + raise NotImplementedError( + "Compressed environment is not implemented yet." + ) + else: # nfnl x nt x ng gg = ll.forward(ss) # nfnl x 4 x ng gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) - if ti_mask is not None: - xyz_scatter[ti_mask] += gr - else: - xyz_scatter += gr + + if ti_mask is not None: + xyz_scatter[ti_mask] += gr + else: + xyz_scatter += gr xyz_scatter /= self.nnei xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) diff --git a/deepmd/pd/model/model/dp_model.py b/deepmd/pd/model/model/dp_model.py index 1e1cee6826..e014be5b68 100644 --- a/deepmd/pd/model/model/dp_model.py +++ b/deepmd/pd/model/model/dp_model.py @@ -3,6 +3,8 @@ Optional, ) +import paddle + from deepmd.pd.model.descriptor.base_descriptor import ( BaseDescriptor, ) @@ -26,7 +28,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -52,3 +54,11 @@ def get_fitting_net(self): def get_descriptor(self): """Get the descriptor.""" return self.atomic_model.descriptor + + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.atomic_model.set_eval_descriptor_hook(enable) + + def eval_descriptor(self) -> paddle.Tensor: + """Evaluate the descriptor.""" + return self.atomic_model.eval_descriptor() diff --git a/deepmd/pd/model/model/frozen.py b/deepmd/pd/model/model/frozen.py index 209cfca9c8..e8128c6bd1 100644 --- a/deepmd/pd/model/model/frozen.py +++ b/deepmd/pd/model/model/frozen.py @@ -34,7 +34,7 @@ def __init__(self, model_file: str, **kwargs): self.model = paddle.jit.load(model_file.split(".json")[0]) else: raise NotImplementedError( - "Only support .json file, " f"but received {model_file}" + f"Only support .json file, but received {model_file}" ) def fitting_output_def(self) -> FittingOutputDef: @@ -162,7 +162,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index 258ba5d2fc..67b46d4d87 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -100,7 +100,34 @@ def model_output_type(self) -> list[str]: vars.append(kk) return vars - # cannot use the name forward. paddle script does not work + def enable_compression( + self, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call atomic_model enable_compression(). + + Parameters + ---------- + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.atomic_model.enable_compression( + self.get_min_nbor_dist(), + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_common( self, coord, @@ -131,7 +158,7 @@ def forward_common( Returns ------- ret_dict - The result dict of type Dict[str,paddle.Tensor]. + The result dict of type dict[str,paddle.Tensor]. The keys are defined by the `ModelOutputDef`. """ @@ -185,11 +212,11 @@ def change_out_bias( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. bias_adjust_mode : str @@ -223,7 +250,7 @@ def forward_common_lower( Parameters ---------- extended_coord - coodinates in extended region. nf x (nall x 3) + coordinates in extended region. nf x (nall x 3) extended_atype atomic type in extended region. nf x nall nlist @@ -364,7 +391,7 @@ def format_nlist( the `nlist` is pad with -1. 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), - the nearest sum(sel) neighbors will be preseved. + the nearest sum(sel) neighbors will be preserved. Known limitations: @@ -374,7 +401,7 @@ def format_nlist( Parameters ---------- extended_coord - coodinates in extended region. nf x nall x 3 + coordinates in extended region. nf x nall x 3 extended_atype atomic type in extended region. nf x nall nlist @@ -384,8 +411,8 @@ def format_nlist( Returns ------- - formated_nlist - the formated nlist. + formatted_nlist + the formatted nlist. """ mixed_types = self.mixed_types() @@ -419,7 +446,7 @@ def _format_nlist( * paddle.ones( [n_nf, n_nloc, nnei - n_nnei], dtype=nlist.dtype, - ), + ).to(nlist.place), ], axis=-1, ) diff --git a/deepmd/pd/model/model/transform_output.py b/deepmd/pd/model/model/transform_output.py index 148258d8f2..469bfd3168 100644 --- a/deepmd/pd/model/model/transform_output.py +++ b/deepmd/pd/model/model/transform_output.py @@ -26,8 +26,7 @@ def atomic_virial_corr( coord = coord.detach() ce = coord * atom_energy sumce0, sumce1, sumce2 = paddle.split(paddle.sum(ce, axis=1), [1, 1, 1], axis=-1) - faked_grad = paddle.ones_like(sumce0) - # lst = paddle.jit.annotate(List[Optional[paddle.Tensor]], [faked_grad]) + # faked_grad = paddle.ones_like(sumce0) extended_virial_corr0 = paddle.autograd.grad( [sumce0], [extended_coord], @@ -106,14 +105,6 @@ def get_leading_dims( return list(vshape[: (len(vshape) - len(vdef.shape))]) -def get_atom_axis( - vdef: paddle.Tensor, -): - """Get the axis of atoms.""" - atom_axis = -(len(vdef.shape) + 1) - return atom_axis - - def take_deriv( vv: paddle.Tensor, svv: paddle.Tensor, @@ -177,7 +168,10 @@ def fit_output_to_model_output( atom_axis = -(len(shap) + 1) if vdef.reducible: kk_redu = get_reduce_name(kk) - model_ret[kk_redu] = paddle.sum(vv.astype(redu_prec), axis=atom_axis) + if vdef.intensive: + model_ret[kk_redu] = paddle.mean(vv.astype(redu_prec), axis=atom_axis) + else: + model_ret[kk_redu] = paddle.sum(vv.astype(redu_prec), axis=atom_axis) if vdef.r_differentiable: kk_derv_r, kk_derv_c = get_deriv_name(kk) dr, dc = take_deriv( diff --git a/deepmd/pd/model/network/layernorm.py b/deepmd/pd/model/network/layernorm.py index 0d052cfb90..fdf2433ed2 100644 --- a/deepmd/pd/model/network/layernorm.py +++ b/deepmd/pd/model/network/layernorm.py @@ -99,6 +99,10 @@ def forward( yy: paddle.Tensor The output. """ + # mean = xx.mean(dim=-1, keepdim=True) + # variance = xx.var(dim=-1, unbiased=False, keepdim=True) + # The following operation is the same as above, but will not raise error when using jit model to inference. + # See https://github.com/pytorch/pytorch/issues/85792 if xx.numel() > 0: variance, mean = ( paddle.var(xx, axis=-1, unbiased=False, keepdim=True), diff --git a/deepmd/pd/model/task/ener.py b/deepmd/pd/model/task/ener.py index 5baef6ce7c..ed0cfac69d 100644 --- a/deepmd/pd/model/task/ener.py +++ b/deepmd/pd/model/task/ener.py @@ -82,5 +82,4 @@ def serialize(self) -> dict: "type": "ener", } - # make jit happy with paddle 2.0.0 exclude_types: list[int] diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index 63a6ff682e..9008ef8af3 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -59,7 +59,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -96,7 +96,7 @@ class GeneralFitting(Fitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : paddle.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -121,11 +121,13 @@ class GeneralFitting(Fitting): Now this only supports setting all the parameters in the fitting net at one state. When in list[bool], the trainable will be True only if all the boolean parameters are True. remove_vaccum_contribution: list[bool], optional - Remove vaccum contribution before the bias is added. The list assigned each + Remove vacuum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same - length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. """ def __init__( @@ -147,6 +149,7 @@ def __init__( trainable: Union[bool, list[bool]] = True, remove_vaccum_contribution: Optional[list[bool]] = None, type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, **kwargs, ): super().__init__() @@ -164,6 +167,7 @@ def __init__( self.rcond = rcond self.seed = seed self.type_map = type_map + self.use_aparam_as_mask = use_aparam_as_mask # order matters, should be place after the assignment of ntypes self.reinit_exclude(exclude_types) self.trainable = trainable @@ -206,7 +210,11 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None - in_dim = self.dim_descrpt + self.numb_fparam + self.numb_aparam + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + ) self.filter_layers = NetworkCollection( 1 if not self.mixed_types else 0, @@ -292,13 +300,12 @@ def serialize(self) -> dict: # "trainable": self.trainable , # "atom_ener": self.atom_ener , # "layer_name": self.layer_name , - # "use_aparam_as_mask": self.use_aparam_as_mask , # "spin": self.spin , ## NOTICE: not supported by far "tot_ener_zero": False, "trainable": [self.trainable] * (len(self.neuron) + 1), "layer_name": None, - "use_aparam_as_mask": False, + "use_aparam_as_mask": self.use_aparam_as_mask, "spin": None, } @@ -399,9 +406,9 @@ def _forward_common( xx = descriptor if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set - # Idealy, the input for vaccum should be computed; + # Ideally, the input for vacuum should be computed; # we consider it as always zero for convenience. - # Needs a compute_input_stats for vaccum passed from the + # Needs a compute_input_stats for vacuum passed from the # descriptor. xx_zeros = paddle.zeros_like(xx) else: @@ -411,8 +418,8 @@ def _forward_common( if nd != self.dim_descrpt: raise ValueError( - "get an input descriptor of dim {nd}," - "which is not consistent with {self.dim_descrpt}." + f"get an input descriptor of dim {nd}," + f"which is not consistent with {self.dim_descrpt}." ) # check fparam dim, concate to input descriptor if self.numb_fparam > 0: @@ -440,7 +447,7 @@ def _forward_common( axis=-1, ) # check aparam dim, concate to input descriptor - if self.numb_aparam > 0: + if self.numb_aparam > 0 and not self.use_aparam_as_mask: assert aparam is not None, "aparam should not be None" assert self.aparam_avg is not None assert self.aparam_inv_std is not None diff --git a/deepmd/pd/model/task/invar_fitting.py b/deepmd/pd/model/task/invar_fitting.py index 5a6cad7c2d..46b776a022 100644 --- a/deepmd/pd/model/task/invar_fitting.py +++ b/deepmd/pd/model/task/invar_fitting.py @@ -47,7 +47,7 @@ class InvarFitting(GeneralFitting): Embedding width per atom. dim_out : int The output dimension of the fitting net. - neuron : List[int] + neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : paddle.Tensor, optional Average enery per atom for each element. @@ -68,16 +68,17 @@ class InvarFitting(GeneralFitting): The condition number for the regression of atomic energy. seed : int, optional Random seed. - exclude_types: List[int] + exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. - atom_ener: List[Optional[paddle.Tensor]], optional + atom_ener: list[Optional[paddle.Tensor]], optional Specifying atomic energy contribution in vacuum. The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. - type_map: List[str], Optional + The `set_davg_zero` key in the descriptor should be set. + type_map: list[str], Optional A list of strings. Give the name to each type of atoms. - + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. """ def __init__( @@ -99,6 +100,7 @@ def __init__( exclude_types: list[int] = [], atom_ener: Optional[list[Optional[paddle.Tensor]]] = None, type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, **kwargs, ): self.dim_out = dim_out @@ -122,6 +124,7 @@ def __init__( if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 else [x is not None for x in atom_ener], type_map=type_map, + use_aparam_as_mask=use_aparam_as_mask, **kwargs, ) @@ -177,5 +180,4 @@ def forward( """ return self._forward_common(descriptor, atype, gr, g2, h2, fparam, aparam) - # make jit happy with paddle 2.0.0 exclude_types: list[int] diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index af73481ec5..08dfa79e81 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -75,10 +75,6 @@ DPH5Path, ) -# if paddle.__version__.startswith("2"): -# import paddle._dynamo - - log = logging.getLogger(__name__) @@ -974,16 +970,19 @@ def log_loss_valid(_task_key="Default"): "files, which can be viewd in NVIDIA Nsight Systems software" ) - def save_model(self, save_path: Path, lr=0.0, step=0): + def save_model(self, save_path, lr=0.0, step=0): module = ( self.wrapper.module if dist.is_available() and dist.is_initialized() else self.wrapper ) - module.train_infos["lr"] = lr + module.train_infos["lr"] = float(lr) module.train_infos["step"] = step + optim_state_dict = deepcopy(self.optimizer.state_dict()) + for item in optim_state_dict["param_groups"]: + item["lr"] = float(item["lr"]) paddle.save( - {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, + {"model": module.state_dict(), "optimizer": optim_state_dict}, str(save_path), ) checkpoint_dir = save_path.parent diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py index 7c07cbf675..da8c284dd7 100644 --- a/deepmd/pd/train/wrapper.py +++ b/deepmd/pd/train/wrapper.py @@ -15,9 +15,6 @@ _StateDict = Union[dict[str, paddle.Tensor], OrderedDict[str, paddle.Tensor]] -# if paddle.__version__.startswith("2"): -# import paddle._dynamo - log = logging.getLogger(__name__) @@ -71,7 +68,7 @@ def share_params(self, shared_links, resume=False): """ Share the parameters of classes following rules defined in shared_links during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ supported_types = ["descriptor", "fitting_net"] for shared_item in shared_links: diff --git a/deepmd/pd/utils/env_mat_stat.py b/deepmd/pd/utils/env_mat_stat.py index 1cc67ecfee..a37a9672f9 100644 --- a/deepmd/pd/utils/env_mat_stat.py +++ b/deepmd/pd/utils/env_mat_stat.py @@ -47,7 +47,7 @@ def compute_stat(self, env_mat: dict[str, paddle.Tensor]) -> dict[str, StatItem] Returns ------- - Dict[str, StatItem] + dict[str, StatItem] The statistics of the environment matrix. """ stats = {} @@ -63,7 +63,7 @@ def compute_stat(self, env_mat: dict[str, paddle.Tensor]) -> dict[str, StatItem] class EnvMatStatSe(EnvMatStat): - """Environmental matrix statistics for the se_a/se_r environemntal matrix. + """Environmental matrix statistics for the se_a/se_r environmental matrix. Parameters ---------- @@ -85,12 +85,12 @@ def iter( Parameters ---------- - data : List[Dict[str, Union[paddle.Tensor, List[Tuple[int, int]]]]] + data : list[dict[str, Union[paddle.Tensor, list[tuple[int, int]]]]] The data. Yields ------ - Dict[str, StatItem] + dict[str, StatItem] The statistics of the environment matrix. """ zero_mean = paddle.zeros( diff --git a/deepmd/pd/utils/learning_rate.py b/deepmd/pd/utils/learning_rate.py index 94c657abd4..3502434bc0 100644 --- a/deepmd/pd/utils/learning_rate.py +++ b/deepmd/pd/utils/learning_rate.py @@ -1,53 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import numpy as np +from deepmd.dpmodel.utils.learning_rate import ( + LearningRateExp, +) - -class LearningRateExp: - def __init__( - self, - start_lr, - stop_lr, - decay_steps, - stop_steps, - decay_rate=None, - **kwargs, - ): - """ - Construct an exponential-decayed learning rate. - - Parameters - ---------- - start_lr - The learning rate at the start of the training. - stop_lr - The desired learning rate at the end of the training. - When decay_rate is explicitly set, this value will serve as - the minimum learning rate during training. In other words, - if the learning rate decays below stop_lr, stop_lr will be applied instead. - decay_steps - The learning rate is decaying every this number of training steps. - stop_steps - The total training steps for learning rate scheduler. - decay_rate - The decay rate for the learning rate. - If provided, the decay rate will be set instead of - calculating it through interpolation between start_lr and stop_lr. - """ - self.start_lr = start_lr - default_ds = 100 if stop_steps // 10 > 100 else stop_steps // 100 + 1 - self.decay_steps = decay_steps - if self.decay_steps >= stop_steps: - self.decay_steps = default_ds - self.decay_rate = np.exp( - np.log(stop_lr / self.start_lr) / (stop_steps / self.decay_steps) - ) - if decay_rate is not None: - self.decay_rate = decay_rate - self.min_lr = stop_lr - - def value(self, step): - """Get the learning rate at the given step.""" - step_lr = self.start_lr * np.power(self.decay_rate, step // self.decay_steps) - if step_lr < self.min_lr: - step_lr = self.min_lr - return step_lr +__all__ = [ + "LearningRateExp", +] diff --git a/deepmd/pd/utils/neighbor_stat.py b/deepmd/pd/utils/neighbor_stat.py index a1e60459ca..af39161e98 100644 --- a/deepmd/pd/utils/neighbor_stat.py +++ b/deepmd/pd/utils/neighbor_stat.py @@ -25,7 +25,7 @@ class NeighborStatOP(paddle.nn.Layer): - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- @@ -44,7 +44,7 @@ def __init__( mixed_types: bool, ) -> None: super().__init__() - self.rcut = rcut + self.rcut = float(rcut) self.ntypes = ntypes self.mixed_types = mixed_types diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py index 851ff5293d..44924ce07d 100644 --- a/deepmd/pd/utils/nlist.py +++ b/deepmd/pd/utils/nlist.py @@ -57,7 +57,7 @@ def build_neighbor_list( sel: Union[int, list[int]], distinguish_types: bool = True, ) -> paddle.Tensor: - """Build neightbor list for a single frame. keeps nsel neighbors. + """Build neighbor list for a single frame. keeps nsel neighbors. Parameters ---------- @@ -70,7 +70,7 @@ def build_neighbor_list( number of local atoms. rcut : float cut-off radius - sel : int or List[int] + sel : int or list[int] maximal number of neighbors (of each type). if distinguish_types==True, nsel should be list and the length of nsel should be equal to number of @@ -126,10 +126,10 @@ def build_neighbor_list( # nloc x (nall-1) rr = rr[:, :, 1:] nlist = nlist[:, :, 1:] - t = _trim_mask_distinguish_nlist( + + return _trim_mask_distinguish_nlist( is_vir, atype, rr, nlist, rcut, sel, distinguish_types ) - return t def _trim_mask_distinguish_nlist( @@ -158,7 +158,7 @@ def _trim_mask_distinguish_nlist( device=rr.place, dtype=rr.dtype ) + rcut, - ], # pylint: disable=no-explicit-dtype + ], axis=-1, ) nlist = paddle.concat( @@ -214,7 +214,7 @@ def build_directional_neighbor_list( if type < 0 the atom is treated as virtual atoms. rcut : float cut-off radius - sel : int or List[int] + sel : int or list[int] maximal number of neighbors (of each type). if distinguish_types==True, nsel should be list and the length of nsel should be equal to number of @@ -361,14 +361,14 @@ def build_multiple_neighbor_list( nlist : paddle.Tensor Neighbor list of shape [batch_size, nloc, nsel], the neighbors should be stored in an ascending order. - rcuts : List[float] + rcuts : list[float] list of cut-off radius in ascending order. - nsels : List[int] + nsels : list[int] maximal number of neighbors in ascending order. Returns ------- - nlist_dict : Dict[str, paddle.Tensor] + nlist_dict : dict[str, paddle.Tensor] A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) value being the corresponding nlist. @@ -460,6 +460,7 @@ def extend_coord_with_ghosts( """ device = coord.place nf, nloc = atype.shape[:2] + # int64 for index aidx = paddle.tile(paddle.arange(nloc).to(device=device).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype if cell is None: nall = nloc diff --git a/deepmd/pd/utils/preprocess.py b/deepmd/pd/utils/preprocess.py index 052d9941f8..3e047c1b8b 100644 --- a/deepmd/pd/utils/preprocess.py +++ b/deepmd/pd/utils/preprocess.py @@ -1,240 +1,11 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging -from typing import ( - Union, -) import paddle -from deepmd.pd.utils import ( - decomp, - env, -) - log = logging.getLogger(__name__) -class Region3D: - def __init__(self, boxt): - """Construct a simulation box.""" - boxt = boxt.reshape([3, 3]) - self.boxt = boxt # convert physical coordinates to internal ones - self.rec_boxt = paddle.linalg.inv( - self.boxt - ) # convert internal coordinates to physical ones - - self.volume = paddle.linalg.det(self.boxt) # compute the volume - - # boxt = boxt.permute(1, 0) - c_yz = paddle.cross(boxt[1], boxt[2]) - # self._h2yz = self.volume / paddle.linalg.norm(c_yz) - self._h2yz = self.volume / decomp.norm(c_yz) - c_zx = paddle.cross(boxt[2], boxt[0]) - # self._h2zx = self.volume / paddle.linalg.norm(c_zx) - self._h2zx = self.volume / decomp.norm(c_zx) - c_xy = paddle.cross(boxt[0], boxt[1]) - # self._h2xy = self.volume / paddle.linalg.norm(c_xy) - self._h2xy = self.volume / decomp.norm(c_xy) - - def phys2inter(self, coord): - """Convert physical coordinates to internal ones.""" - return coord @ self.rec_boxt - - def inter2phys(self, coord): - """Convert internal coordinates to physical ones.""" - return coord @ self.boxt - - def get_face_distance(self): - """Return face distinces to each surface of YZ, ZX, XY.""" - return paddle.stack([self._h2yz, self._h2zx, self._h2xy]) - - -def normalize_coord(coord, region: Region3D, nloc: int): - """Move outer atoms into region by mirror. - - Args: - - coord: shape is [nloc*3] - """ - tmp_coord = coord.clone() - inter_cood = paddle.remainder(region.phys2inter(tmp_coord), 1.0) - tmp_coord = region.inter2phys(inter_cood) - return tmp_coord - - -def compute_serial_cid(cell_offset, ncell): - """Tell the sequential cell ID in its 3D space. - - Args: - - cell_offset: shape is [3] - - ncell: shape is [3] - """ - cell_offset[:, 0] *= ncell[1] * ncell[2] - cell_offset[:, 1] *= ncell[2] - return cell_offset.sum(-1) - - -def compute_pbc_shift(cell_offset, ncell): - """Tell shift count to move the atom into region.""" - shift = paddle.zeros_like(cell_offset) - shift = shift + (cell_offset < 0) * -( - paddle.floor(paddle.divide(cell_offset, ncell)) - ) - shift = shift + (cell_offset >= ncell) * -( - paddle.floor(paddle.divide((cell_offset - ncell), ncell)) + 1 - ) - assert paddle.all(cell_offset + shift * ncell >= 0) - assert paddle.all(cell_offset + shift * ncell < ncell) - return shift - - -def build_inside_clist(coord, region: Region3D, ncell): - """Build cell list on atoms inside region. - - Args: - - coord: shape is [nloc*3] - - ncell: shape is [3] - """ - loc_ncell = int(paddle.prod(ncell)) # num of local cells - nloc = coord.numel() // 3 # num of local atoms - inter_cell_size = 1.0 / ncell - - inter_cood = region.phys2inter(coord.reshape([-1, 3])) - cell_offset = paddle.floor(inter_cood / inter_cell_size).to(paddle.int64) - # numerical error brought by conversion from phys to inter back and force - # may lead to negative value - cell_offset[cell_offset < 0] = 0 - delta = cell_offset - ncell - a2c = compute_serial_cid(cell_offset, ncell) # cell id of atoms - arange = paddle.arange(0, loc_ncell, 1) # pylint: disable=no-explicit-dtype,no-explicit-device - cellid = a2c == arange.unsqueeze(-1) # one hot cellid - c2a = cellid.nonzero() - lst = [] - cnt = 0 - bincount = paddle.bincount(a2c, minlength=loc_ncell) - for i in range(loc_ncell): - n = bincount[i] - lst.append(c2a[cnt : cnt + n, 1]) - cnt += n - return a2c, lst - - -def append_neighbors(coord, region: Region3D, atype, rcut: float): - """Make ghost atoms who are valid neighbors. - - Args: - - coord: shape is [nloc*3] - - atype: shape is [nloc] - """ - to_face = region.get_face_distance() - - # compute num and size of local cells - ncell = paddle.floor(to_face / rcut).to(paddle.int64) - ncell[ncell == 0] = 1 - cell_size = to_face / ncell - ngcell = ( - paddle.floor(rcut / cell_size).to(paddle.int64) + 1 - ) # num of cells out of local, which contain ghost atoms - - # add ghost atoms - a2c, c2a = build_inside_clist(coord, region, ncell) - xi = paddle.arange(-ngcell[0], ncell[0] + ngcell[0], 1) # pylint: disable=no-explicit-dtype,no-explicit-device - yi = paddle.arange(-ngcell[1], ncell[1] + ngcell[1], 1) # pylint: disable=no-explicit-dtype,no-explicit-device - zi = paddle.arange(-ngcell[2], ncell[2] + ngcell[2], 1) # pylint: disable=no-explicit-dtype,no-explicit-device - xyz = xi.reshape([-1, 1, 1, 1]) * paddle.to_tensor([1, 0, 0], dtype=paddle.int64) # pylint: disable=no-explicit-device - xyz = xyz + yi.reshape([1, -1, 1, 1]) * paddle.to_tensor( - [0, 1, 0], dtype=paddle.int64 - ) # pylint: disable=no-explicit-device - xyz = xyz + zi.reshape([1, 1, -1, 1]) * paddle.to_tensor( - [0, 0, 1], dtype=paddle.int64 - ) # pylint: disable=no-explicit-device - xyz = xyz.reshape([-1, 3]) - mask_a = (xyz >= 0).all(axis=-1) - mask_b = (xyz < ncell).all(axis=-1) - mask = ~paddle.logical_and(mask_a, mask_b) - xyz = xyz[mask] # cell coord - shift = compute_pbc_shift(xyz, ncell) - coord_shift = region.inter2phys(shift.to(env.GLOBAL_PD_FLOAT_PRECISION)) - mirrored = shift * ncell + xyz - cid = compute_serial_cid(mirrored, ncell) - - n_atoms = coord.shape[0] - aid = [c2a[ci] + i * n_atoms for i, ci in enumerate(cid)] - aid = paddle.concat(aid) - tmp = paddle.trunc(paddle.divide(aid, n_atoms)) - aid = aid % n_atoms - tmp_coord = coord[aid] - coord_shift[tmp] - tmp_atype = atype[aid] - - # merge local and ghost atoms - merged_coord = paddle.concat([coord, tmp_coord]) - merged_coord_shift = paddle.concat([paddle.zeros_like(coord), coord_shift[tmp]]) - merged_atype = paddle.concat([atype, tmp_atype]) - merged_mapping = paddle.concat([paddle.arange(atype.numel()), aid]) # pylint: disable=no-explicit-dtype,no-explicit-device - return merged_coord_shift, merged_atype, merged_mapping - - -def build_neighbor_list( - nloc: int, coord, atype, rcut: float, sec, mapping, type_split=True, min_check=False -): - """For each atom inside region, build its neighbor list. - - Args: - - coord: shape is [nall*3] - - atype: shape is [nall] - """ - nall = coord.numel() // 3 - coord = coord.astype(paddle.get_default_dtype()) - nlist = [[] for _ in range(nloc)] - coord_l = coord.reshape([-1, 1, 3])[:nloc] - coord_r = coord.reshape([1, -1, 3]) - distance = coord_l - coord_r - # distance = paddle.linalg.norm(distance, axis=-1) - distance = decomp.norm(distance, axis=-1) - DISTANCE_INF = distance.max().detach() + rcut - distance[:nloc, :nloc] += paddle.eye(nloc, dtype=paddle.bool) * DISTANCE_INF # pylint: disable=no-explicit-device - if min_check: - if distance.min().abs() < 1e-6: - raise RuntimeError("Atom dist too close!") - if not type_split: - sec = sec[-1:] - lst = [] - nlist = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device - nlist_loc = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device - nlist_type = paddle.zeros((nloc, sec[-1].item())).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device - for i, nnei in enumerate(sec): - if i > 0: - nnei = nnei - sec[i - 1] - if not type_split: - tmp = distance - else: - mask = atype.unsqueeze(0) == i - tmp = distance + (~mask) * DISTANCE_INF - if tmp.shape[1] >= nnei: - _sorted, indices = paddle.topk(tmp, nnei, axis=1, largest=False) - else: - # when nnei > nall - indices = paddle.zeros((nloc, nnei)).long() - 1 # pylint: disable=no-explicit-dtype,no-explicit-device - _sorted = paddle.ones((nloc, nnei)).long() * DISTANCE_INF # pylint: disable=no-explicit-dtype,no-explicit-device - _sorted_nnei, indices_nnei = paddle.topk( - tmp, tmp.shape[1], axis=1, largest=False - ) - _sorted[:, : tmp.shape[1]] = _sorted_nnei - indices[:, : tmp.shape[1]] = indices_nnei - mask = (_sorted < rcut).to(paddle.int64) - indices_loc = mapping[indices] - indices = indices * mask + -1 * (1 - mask) # -1 for padding - indices_loc = indices_loc * mask + -1 * (1 - mask) # -1 for padding - if i == 0: - start = 0 - else: - start = sec[i - 1] - end = min(sec[i], start + indices.shape[1]) - nlist[:, start:end] = indices[:, :nnei] - nlist_loc[:, start:end] = indices_loc[:, :nnei] - nlist_type[:, start:end] = atype[indices[:, :nnei]] * mask + -1 * (1 - mask) - return nlist, nlist_loc, nlist_type - - def compute_smooth_weight(distance, rmin: float, rmax: float): """Compute smooth weight for descriptor elements.""" if rmin >= rmax: @@ -245,70 +16,3 @@ def compute_smooth_weight(distance, rmin: float, rmax: float): uu = (distance - rmin) / (rmax - rmin) vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 return vv * mid_mask.astype(vv.dtype) + min_mask.astype(vv.dtype) - - -def make_env_mat( - coord, - atype, - region, - rcut: Union[float, list], - sec, - pbc=True, - type_split=True, - min_check=False, -): - """Based on atom coordinates, return environment matrix. - - Returns - ------- - nlist: nlist, [nloc, nnei] - merged_coord_shift: shift on nall atoms, [nall, 3] - merged_mapping: mapping from nall index to nloc index, [nall] - """ - # move outer atoms into cell - hybrid = isinstance(rcut, list) - _rcut = rcut - if hybrid: - _rcut = max(rcut) - if pbc: - merged_coord_shift, merged_atype, merged_mapping = append_neighbors( - coord, region, atype, _rcut - ) - merged_coord = coord[merged_mapping] - merged_coord_shift - if merged_coord.shape[0] <= coord.shape[0]: - log.warning("No ghost atom is added for system ") - else: - merged_coord_shift = paddle.zeros_like(coord) - merged_atype = atype.clone() - merged_mapping = paddle.arange(atype.numel()) # pylint: disable=no-explicit-dtype,no-explicit-device - merged_coord = coord.clone() - - # build nlist - if not hybrid: - nlist, nlist_loc, nlist_type = build_neighbor_list( - coord.shape[0], - merged_coord, - merged_atype, - rcut, - sec, - merged_mapping, - type_split=type_split, - min_check=min_check, - ) - else: - nlist, nlist_loc, nlist_type = [], [], [] - for ii, single_rcut in enumerate(rcut): - nlist_tmp, nlist_loc_tmp, nlist_type_tmp = build_neighbor_list( - coord.shape[0], - merged_coord, - merged_atype, - single_rcut, - sec[ii], - merged_mapping, - type_split=type_split, - min_check=min_check, - ) - nlist.append(nlist_tmp) - nlist_loc.append(nlist_loc_tmp) - nlist_type.append(nlist_type_tmp) - return nlist, nlist_loc, nlist_type, merged_coord_shift, merged_mapping diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py index 160a4d124e..21927e3619 100644 --- a/deepmd/pd/utils/region.py +++ b/deepmd/pd/utils/region.py @@ -79,20 +79,6 @@ def to_face_distance( return dist.reshape(list(cshape[:-2]) + [3]) # noqa:RUF005 -def _to_face_distance(cell): - volume = paddle.linalg.det(cell) - c_yz = paddle.cross(cell[1], cell[2]) - # _h2yz = volume / paddle.linalg.norm(c_yz) - _h2yz = volume / decomp.norm(c_yz) - c_zx = paddle.cross(cell[2], cell[0]) - # _h2zx = volume / paddle.linalg.norm(c_zx) - _h2zx = volume / decomp.norm(c_zx) - c_xy = paddle.cross(cell[0], cell[1]) - # _h2xy = volume / paddle.linalg.norm(c_xy) - _h2xy = volume / decomp.norm(c_xy) - return paddle.stack([_h2yz, _h2zx, _h2xy]) - - def b_to_face_distance(cell): volume = paddle.linalg.det(cell) c_yz = paddle.cross(cell[:, 1], cell[:, 2], axis=-1) @@ -120,7 +106,7 @@ def normalize_coord( Parameters ---------- coord : paddle.Tensor - orignal coordinates of shape [*, na, 3]. + original coordinates of shape [*, na, 3]. Returns ------- @@ -129,5 +115,5 @@ def normalize_coord( """ icoord = phys2inter(coord, cell) - icoord = paddle.remainder(icoord, paddle.to_tensor(1.0)) + icoord = paddle.remainder(icoord, paddle.full([], 1.0)) return inter2phys(icoord, cell) diff --git a/deepmd/pd/utils/serialization.py b/deepmd/pd/utils/serialization.py index 7beb459c0c..0274608424 100644 --- a/deepmd/pd/utils/serialization.py +++ b/deepmd/pd/utils/serialization.py @@ -1,4 +1,11 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import json + +import paddle + +from deepmd.pd.model.model.model import ( + BaseModel, +) def serialize_from_file(model_file: str) -> dict: @@ -27,4 +34,22 @@ def deserialize_to_file(model_file: str, data: dict) -> None: data : dict The dictionary to be deserialized. """ - raise NotImplementedError("Paddle do not support jit.export yet.") + if not model_file.endswith(".json"): + raise ValueError("Paddle backend only supports converting .json file") + model = BaseModel.deserialize(data["model"]) + # JIT will happy in this way... + model.model_def_script = json.dumps(data["model_def_script"]) + if "min_nbor_dist" in data.get("@variables", {}): + model.min_nbor_dist = float(data["@variables"]["min_nbor_dist"]) + # model = paddle.jit.to_static(model) + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + paddle.jit.save( + model, + model_file.split(".json")[0], + ) diff --git a/deepmd/pd/utils/stat.py b/deepmd/pd/utils/stat.py index 3ecd695038..a8bdbd6415 100644 --- a/deepmd/pd/utils/stat.py +++ b/deepmd/pd/utils/stat.py @@ -185,8 +185,8 @@ def model_forward_auto_batch_size(*args, **kwargs): def _make_preset_out_bias( ntypes: int, - ibias: list[Optional[np.array]], -) -> Optional[np.array]: + ibias: list[Optional[np.ndarray]], +) -> Optional[np.ndarray]: """Make preset out bias. output: @@ -249,11 +249,11 @@ def compute_output_stats( Parameters ---------- - merged : Union[Callable[[], List[dict]], List[dict]] - - List[dict]: A list of data samples from various data systems. + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` originating from the `i`-th data system. - - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format only when needed. Since the sampling process can be slow and memory-intensive, the lazy function helps by only sampling once. ntypes : int @@ -262,11 +262,11 @@ def compute_output_stats( The path to the stat file. rcond : float, optional The condition number for the regression of atomic energy. - preset_bias : Dict[str, List[Optional[paddle.Tensor]]], optional + preset_bias : dict[str, list[Optional[paddle.Tensor]]], optional Specifying atomic energy contribution in vacuum. Given by key:value pairs. - The value is a list specifying the bias. the elements can be None or np.array of output shape. + The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. model_forward : Callable[..., paddle.Tensor], optional The wrapped forward function of atomic model. If not None, the model will be utilized to generate the original energy prediction, diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index a4d3ccbfea..7debc72a2e 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -488,15 +488,15 @@ def test_jax_self_consistent(self): def test_pd_consistent_with_ref(self): """Test whether PD and reference are consistent.""" - if self.skip_pt: + if self.skip_pd: self.skipTest("Unsupported backend") ref_backend = self.get_reference_backend() if ref_backend == self.RefBackend.PD: self.skipTest("Reference is self") ret1, data1 = self.get_reference_ret_serialization(ref_backend) ret1 = self.extract_ret(ret1, ref_backend) - obj = self.pt_class.deserialize(data1) - ret2 = self.eval_pt(obj) + obj = self.pd_class.deserialize(data1) + ret2 = self.eval_pd(obj) ret2 = self.extract_ret(ret2, self.RefBackend.PD) data2 = obj.serialize() if obj.__class__.__name__.startswith(("Polar", "Dipole", "DOS")): @@ -504,15 +504,26 @@ def test_pd_consistent_with_ref(self): common_keys = set(data1.keys()) & set(data2.keys()) data1 = {k: data1[k] for k in common_keys} data2 = {k: data2[k] for k in common_keys} + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" def test_pd_self_consistent(self): - """Test whether PT is self consistent.""" + """Test whether PD is self consistent.""" if self.skip_pd: self.skipTest("Unsupported backend") obj1 = self.init_backend_cls(self.pd_class) ret1, data1 = self.get_pd_ret_serialization_from_cls(obj1) obj2 = self.pd_class.deserialize(data1) ret2, data2 = self.get_pd_ret_serialization_from_cls(obj2) + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + else: + self.assertEqual(rr1, rr2) @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_array_api_strict_consistent_with_ref(self): diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index cb6fb2cf99..843f94e6be 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -114,6 +114,7 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + pd_class = EnergyModelPD jax_class = EnergyModelJAX pd_class = EnergyModelPD args = model_args() @@ -129,6 +130,8 @@ def get_reference_backend(self): return self.RefBackend.TF if not self.skip_jax: return self.RefBackend.JAX + if not self.skip_pd: + return self.RefBackend.PD if not self.skip_dp: return self.RefBackend.DP raise ValueError("No available reference") @@ -233,9 +236,9 @@ def eval_jax(self, jax_obj: Any) -> Any: self.box, ) - def eval_pd(self, pt_obj: Any) -> Any: + def eval_pd(self, pd_obj: Any) -> Any: return self.eval_pd_model( - pt_obj, + pd_obj, self.natoms, self.coords, self.atype, diff --git a/source/tests/consistent/test_neighbor_stat.py b/source/tests/consistent/test_neighbor_stat.py index 55181a6903..e0c177f707 100644 --- a/source/tests/consistent/test_neighbor_stat.py +++ b/source/tests/consistent/test_neighbor_stat.py @@ -14,6 +14,7 @@ ) from .common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -87,3 +88,7 @@ def test_neighbor_stat_dp(self): @unittest.skipUnless(INSTALLED_JAX, "jax is not installed") def test_neighbor_stat_jax(self): self.run_neighbor_stat("jax") + + @unittest.skipUnless(INSTALLED_PD, "paddle is not installed") + def test_neighbor_stat_pd(self): + self.run_neighbor_stat("paddle") diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py index 0239b0a03f..10f2fd271b 100644 --- a/source/tests/pd/model/test_descriptor.py +++ b/source/tests/pd/model/test_descriptor.py @@ -142,16 +142,16 @@ def test_consistency(self): stddev=std_ones.detach().cpu(), ) - pt_coord = self.pd_batch["coord"].to(env.DEVICE) + pd_coord = self.pd_batch["coord"].to(env.DEVICE) atype = self.pd_batch["atype"].to(env.DEVICE) - pt_coord.stop_gradient = False + pd_coord.stop_gradient = False ( extended_coord, extended_atype, mapping, nlist, ) = extend_input_and_build_neighbor_list( - pt_coord, + pd_coord, self.pd_batch["atype"].to(env.DEVICE), self.rcut, self.sel, @@ -168,8 +168,8 @@ def test_consistency(self): self.rcut_smth, ) my_d.sum().backward() - bsz = pt_coord.shape[0] - my_force = pt_coord.grad.reshape([bsz, -1, 3]).cpu().detach().numpy() + bsz = pd_coord.shape[0] + my_force = pd_coord.grad.reshape([bsz, -1, 3]).cpu().detach().numpy() base_force = base_force.reshape(bsz, -1, 3) base_d = base_d.reshape(bsz, -1, self.nnei, 4) my_d = my_d.reshape([bsz, -1, self.nnei, 4]).cpu().detach().numpy() diff --git a/source/tests/pd/model/test_embedding_net.py b/source/tests/pd/model/test_embedding_net.py index 12c42049e8..2dcc9f821b 100644 --- a/source/tests/pd/model/test_embedding_net.py +++ b/source/tests/pd/model/test_embedding_net.py @@ -54,7 +54,7 @@ def get_single_batch(dataset, index=None): if index is None: index = dp_random.choice(np.arange(len(dataset))) np_batch = dataset[index] - pt_batch = {} + pd_batch = {} for key in [ "coord", @@ -68,11 +68,11 @@ def get_single_batch(dataset, index=None): ]: if key in np_batch.keys(): np_batch[key] = np.expand_dims(np_batch[key], axis=0) - pt_batch[key] = paddle.to_tensor(np_batch[key]).to(device=env.DEVICE) + pd_batch[key] = paddle.to_tensor(np_batch[key]).to(device=env.DEVICE) if key in ["coord", "force", "force_mag"]: np_batch[key] = np_batch[key].reshape(1, -1) np_batch["natoms"] = np_batch["natoms"][0] - return np_batch, pt_batch + return np_batch, pd_batch def base_se_a(descriptor, coord, atype, natoms, box): @@ -181,8 +181,8 @@ def test_consistency(self): # Keep parameter value consistency between 2 implentations paddle.assign(var, param) - pt_coord = self.paddle_batch["coord"].to(env.DEVICE) - pt_coord.stop_gradient = False + pd_coord = self.paddle_batch["coord"].to(env.DEVICE) + pd_coord.stop_gradient = False ( extended_coord, @@ -190,7 +190,7 @@ def test_consistency(self): mapping, nlist, ) = extend_input_and_build_neighbor_list( - pt_coord, + pd_coord, self.paddle_batch["atype"].to(env.DEVICE), self.rcut, self.sel, @@ -205,7 +205,7 @@ def test_consistency(self): my_embedding = descriptor_out.cpu().detach().numpy() fake_energy = paddle.sum(descriptor_out) fake_energy.backward() - my_force = -pt_coord.grad.cpu().numpy() + my_force = -pd_coord.grad.cpu().numpy() # Check np.testing.assert_allclose(dp_embedding, my_embedding) diff --git a/source/tests/pd/model/test_jit.py b/source/tests/pd/model/test_jit.py index 772a05530f..28ab499bf1 100644 --- a/source/tests/pd/model/test_jit.py +++ b/source/tests/pd/model/test_jit.py @@ -53,7 +53,7 @@ def test_jit(self): def tearDown(self): for f in os.listdir("."): - if f.startswith("model") and f.endswith("pt"): + if f.startswith("model") and f.endswith("pd"): os.remove(f) if f in ["lcurve.out", "frozen_model.json", "frozen_model.pdiparams"]: os.remove(f) diff --git a/source/tests/pd/test_lr.py b/source/tests/pd/test_lr.py index f5ce911b04..35746b6713 100644 --- a/source/tests/pd/test_lr.py +++ b/source/tests/pd/test_lr.py @@ -27,7 +27,7 @@ def test_consistency(self): self.decay_step = decay_step self.stop_step = stop_step self.judge_it() - self.decay_rate_pt() + self.decay_rate_pd() def judge_it(self): base_lr = learning_rate.LearningRateExp( @@ -55,7 +55,7 @@ def judge_it(self): self.assertTrue(np.allclose(base_vals, my_vals)) tf.reset_default_graph() - def decay_rate_pt(self): + def decay_rate_pd(self): my_lr = LearningRateExp( self.start_lr, self.stop_lr, self.decay_step, self.stop_step ) From 1c0161c45dae0f3fa7b7e562fbee99bcbb696243 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 12:39:15 +0800 Subject: [PATCH 32/58] fix pth -> json --- deepmd/pd/entrypoints/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index fef160c0b0..a174d5f045 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -343,7 +343,7 @@ def train( def freeze( model: str, - output: str = "frozen_model.pth", + output: str = "frozen_model.json", head: Optional[str] = None, ): paddle.set_flags( From 3354e5c94ef9f249daaae4e014baba5634a26ab9 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 13:09:13 +0800 Subject: [PATCH 33/58] update unitest and training --- deepmd/pd/train/training.py | 5 +--- source/tests/pd/model/test_region.py | 40 ++-------------------------- 2 files changed, 3 insertions(+), 42 deletions(-) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 08dfa79e81..916376e552 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -978,11 +978,8 @@ def save_model(self, save_path, lr=0.0, step=0): ) module.train_infos["lr"] = float(lr) module.train_infos["step"] = step - optim_state_dict = deepcopy(self.optimizer.state_dict()) - for item in optim_state_dict["param_groups"]: - item["lr"] = float(item["lr"]) paddle.save( - {"model": module.state_dict(), "optimizer": optim_state_dict}, + {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, str(save_path), ) checkpoint_dir = save_path.parent diff --git a/source/tests/pd/model/test_region.py b/source/tests/pd/model/test_region.py index 7878e73cab..93fa82d8a5 100644 --- a/source/tests/pd/model/test_region.py +++ b/source/tests/pd/model/test_region.py @@ -4,12 +4,6 @@ import numpy as np import paddle -from deepmd.pd.utils import ( - env, -) -from deepmd.pd.utils.preprocess import ( - Region3D, -) from deepmd.pd.utils.region import ( inter2phys, to_face_distance, @@ -29,7 +23,7 @@ def setUp(self): ) self.cell = self.cell.unsqueeze(0).unsqueeze(0) self.cell = paddle.tile(self.cell, [4, 5, 1, 1]) - self.prec = 1e-8 + self.prec = 9e-8 def test_inter_to_phys(self): generator = paddle.seed(GLOBAL_SEED) @@ -56,8 +50,8 @@ def test_to_face_dist(self): dz = vol / sxy dy = vol / sxz dx = vol / syz + expected = paddle.to_tensor([dx, dy, dz], place="cpu") dists = to_face_distance(self.cell) - expected = paddle.to_tensor([dx, dy, dz], dtype=dists.dtype).to(device="cpu") for ii in range(4): for jj in range(5): np.testing.assert_allclose( @@ -66,33 +60,3 @@ def test_to_face_dist(self): rtol=self.prec, atol=self.prec, ) - - -class TestLegacyRegion(unittest.TestCase): - def setUp(self): - self.cell = paddle.to_tensor( - [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place=env.DEVICE - ) - self.prec = 1e-6 - - def test_inter_to_phys(self): - generator = paddle.seed(GLOBAL_SEED) - inter = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) - reg = Region3D(self.cell) - phys = reg.inter2phys(inter) - expected_phys = paddle.matmul(inter, self.cell) - np.testing.assert_allclose( - phys.numpy(), expected_phys.numpy(), rtol=self.prec, atol=self.prec - ) - - def test_inter_to_inter(self): - generator = paddle.seed(GLOBAL_SEED) - inter = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) - reg = Region3D(self.cell) - new_inter = reg.phys2inter(reg.inter2phys(inter)) - np.testing.assert_allclose( - inter.numpy(), new_inter.numpy(), rtol=self.prec, atol=self.prec - ) - - def test_to_face_dist(self): - pass From 0d3f8cfdb76a1befb0619bdd779a76cae2a56704 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 13:14:07 +0800 Subject: [PATCH 34/58] install paddle when test_cuda --- .github/workflows/test_cuda.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 996a1bcff0..266e6ec653 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -52,6 +52,7 @@ jobs: export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12,torch,jax] mpi4py + source/install/uv_with_retry.sh pip install --system --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu123/ env: DP_VARIANT: cuda DP_ENABLE_NATIVE_OPTIMIZATION: 1 From 859b94d3965016d507ef5ed64b5c7a1645d53f4f Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 15:59:40 +0800 Subject: [PATCH 35/58] fix unitest --- .pre-commit-config.yaml | 52 ++++++++++++------------- deepmd/pd/model/descriptor/se_a.py | 23 ++++++----- deepmd/pd/model/task/invar_fitting.py | 2 +- source/tests/consistent/model/common.py | 1 + 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 721a0cd6eb..9e7e3b763e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 216d32783e..180d6f0a3f 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -672,23 +672,22 @@ def forward( rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] else: rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] - rr = rr * mm[:, :, None].astype(rr.dtype) - ss = rr[:, :, :1] - if self.compress: raise NotImplementedError( "Compressed environment is not implemented yet." ) else: - # nfnl x nt x ng - gg = ll.forward(ss) - # nfnl x 4 x ng - gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) - - if ti_mask is not None: - xyz_scatter[ti_mask] += gr - else: - xyz_scatter += gr + if rr.numel() > 0: + rr = rr * mm.unsqueeze(2).astype(rr.dtype) + ss = rr[:, :, :1] + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if ti_mask is not None: + xyz_scatter[ti_mask] += gr + else: + xyz_scatter += gr xyz_scatter /= self.nnei xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) diff --git a/deepmd/pd/model/task/invar_fitting.py b/deepmd/pd/model/task/invar_fitting.py index 46b776a022..b366fc1d2e 100644 --- a/deepmd/pd/model/task/invar_fitting.py +++ b/deepmd/pd/model/task/invar_fitting.py @@ -50,7 +50,7 @@ class InvarFitting(GeneralFitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : paddle.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index 7d2b984257..43fd021dd7 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -105,5 +105,6 @@ def eval_pd_model(self, pd_obj: Any, natoms, coords, atype, box) -> Any: numpy_to_paddle(coords), numpy_to_paddle(atype), box=numpy_to_paddle(box), + do_atomic_virial=True, ).items() } From 74ee1c2477f5d8cfdfd6fe6011f8818edafeabb5 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 16:49:27 +0800 Subject: [PATCH 36/58] add eta in logging message for convenient --- .pre-commit-config.yaml | 52 ++++++++++++++++++------------------- deepmd/loggers/training.py | 7 ++++- deepmd/pd/train/training.py | 4 +++ 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e7e3b763e..721a0cd6eb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v4.0.0-alpha.8 - # hooks: - # - id: prettier - # types_or: [markdown, yaml, css] - # # workflow files cannot be modified by pre-commit.ci - # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [markdown, yaml, css] + # workflow files cannot be modified by pre-commit.ci + exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - # - repo: https://github.com/njzjz/mirrors-bibtex-tidy - # rev: v1.13.0 - # hooks: - # - id: bibtex-tidy - # args: - # - --curly - # - --numeric - # - --align=13 - # - --blank-lines - # # disable sort: the order of keys and fields has explict meanings - # #- --sort=key - # - --duplicates=key,doi,citation,abstract - # - --merge=combine - # #- --sort-fields - # #- --strip-comments - # - --trailing-commas - # - --encode-urls - # - --remove-empty-fields - # - --wrap=80 + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/loggers/training.py b/deepmd/loggers/training.py index b2fff4788b..438ca5adc2 100644 --- a/deepmd/loggers/training.py +++ b/deepmd/loggers/training.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import datetime from typing import ( Optional, ) @@ -7,9 +8,13 @@ def format_training_message( batch: int, wall_time: float, + eta: Optional[int] = None, ): """Format a training message.""" - return f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" + msg = f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" + if isinstance(eta, int): + msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}" + return msg def format_training_message_per_task( diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 916376e552..c22f60b847 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -825,10 +825,14 @@ def log_loss_valid(_task_key="Default"): train_time = current_time - self.t0 self.t0 = current_time if self.rank == 0 and self.timing_in_training: + eta = int( + (self.num_steps - _step_id - 1) / self.disp_freq * train_time + ) log.info( format_training_message( batch=display_step_id, wall_time=train_time, + eta=eta, ) ) # the first training time is not accurate From f176309490ecb77481cf5373d5677915ce29a644 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 9 Nov 2024 18:05:41 +0800 Subject: [PATCH 37/58] remove hybrid code and enable one unitest --- deepmd/pd/train/wrapper.py | 14 ++++---------- source/tests/pd/test_finetune.py | 1 - 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py index da8c284dd7..c3643f8372 100644 --- a/deepmd/pd/train/wrapper.py +++ b/deepmd/pd/train/wrapper.py @@ -81,11 +81,8 @@ def share_params(self, shared_links, resume=False): if class_type_base == "descriptor": base_class = self.model[model_key_base].get_descriptor() elif "hybrid" in class_type_base: - hybrid_index = int(class_type_base.split("_")[-1]) - base_class = ( - self.model[model_key_base] - .get_descriptor() - .descriptor_list[hybrid_index] + raise NotImplementedError( + "Hybrid descriptor is not implemented yet" ) else: raise RuntimeError(f"Unknown class_type {class_type_base}!") @@ -102,11 +99,8 @@ def share_params(self, shared_links, resume=False): if class_type_link == "descriptor": link_class = self.model[model_key_link].get_descriptor() elif "hybrid" in class_type_link: - hybrid_index = int(class_type_link.split("_")[-1]) - link_class = ( - self.model[model_key_link] - .get_descriptor() - .descriptor_list[hybrid_index] + raise NotImplementedError( + "Hybrid descriptor is not implemented yet" ) else: raise RuntimeError(f"Unknown class_type {class_type_link}!") diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py index 09caa597bf..2c6cca83aa 100644 --- a/source/tests/pd/test_finetune.py +++ b/source/tests/pd/test_finetune.py @@ -294,7 +294,6 @@ def tearDown(self): shutil.rmtree(f) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): def setUp(self): input_json = str(Path(__file__).parent / "water/se_atten.json") From fac51d33a9239dfc0412503813e5ad72f8c7d19b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 11 Nov 2024 11:18:14 +0800 Subject: [PATCH 38/58] add pd/__init__.py --- deepmd/pd/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 deepmd/pd/__init__.py diff --git a/deepmd/pd/__init__.py b/deepmd/pd/__init__.py new file mode 100644 index 0000000000..2f4814885c --- /dev/null +++ b/deepmd/pd/__init__.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# import customized OPs globally +from deepmd.pd.cxx_op import ( + ENABLE_CUSTOMIZED_OP, +) +from deepmd.utils.entry_point import ( + load_entry_point, +) + +load_entry_point("deepmd.pd") + +__all__ = [ + "ENABLE_CUSTOMIZED_OP", +] From db1cd76d3d66f426a9533dfeb96211c3d0878b91 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 11 Nov 2024 11:24:20 +0800 Subject: [PATCH 39/58] fix enable_prim --- deepmd/pd/utils/env.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 3faf6f22e0..4c104db374 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -78,15 +78,14 @@ def enable_prim(enable: bool = True): """Enable running program in primitive C++ API in eager/static mode.""" - if enable: - from paddle.framework import ( - core, - ) + from paddle.framework import ( + core, + ) - core.set_prim_eager_enabled(True) - core._set_prim_all_enabled(True) - log = logging.getLogger(__name__) - log.info("Enable prim in eager and static mode.") + core.set_prim_eager_enabled(enable) + core._set_prim_all_enabled(enable) + log = logging.getLogger(__name__) + log.info(f"{'Enable' if enable else 'Disable'} prim in eager and static mode.") __all__ = [ From 36512fd20b63a690b573baf1ccfa7f89496ed69b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 11 Nov 2024 12:40:25 +0800 Subject: [PATCH 40/58] remove unused layernorm --- deepmd/pd/model/network/layernorm.py | 167 --------------------------- 1 file changed, 167 deletions(-) delete mode 100644 deepmd/pd/model/network/layernorm.py diff --git a/deepmd/pd/model/network/layernorm.py b/deepmd/pd/model/network/layernorm.py deleted file mode 100644 index fdf2433ed2..0000000000 --- a/deepmd/pd/model/network/layernorm.py +++ /dev/null @@ -1,167 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from typing import ( - Optional, - Union, -) - -import numpy as np -import paddle -import paddle.nn as nn - -from deepmd.dpmodel.utils.network import LayerNorm as DPLayerNorm -from deepmd.pd.model.network.init import ( - normal_, - ones_, - zeros_, -) -from deepmd.pd.utils import ( - env, -) -from deepmd.pd.utils.env import ( - DEFAULT_PRECISION, - PRECISION_DICT, -) -from deepmd.pd.utils.utils import ( - get_generator, - to_numpy_array, - to_paddle_tensor, -) - -device = env.DEVICE - - -def empty_t(shape, precision): - return paddle.empty(shape, dtype=precision).to(device=device) - - -class LayerNorm(nn.Layer): - def __init__( - self, - num_in, - eps: float = 1e-5, - uni_init: bool = True, - bavg: float = 0.0, - stddev: float = 1.0, - precision: str = DEFAULT_PRECISION, - trainable: bool = True, - seed: Optional[Union[int, list[int]]] = None, - ): - super().__init__() - self.eps = eps - self.uni_init = uni_init - self.num_in = num_in - self.precision = precision - self.prec = PRECISION_DICT[self.precision] - self.matrix = self.create_parameter( - shape=[num_in], - dtype=self.prec, - default_initializer=nn.initializer.Assign( - empty_t([num_in], self.prec), - ), - ) - self.bias = self.create_parameter( - shape=[num_in], - dtype=self.prec, - default_initializer=nn.initializer.Assign(empty_t([num_in], self.prec)), - ) - random_generator = get_generator(seed) - if self.uni_init: - ones_(self.matrix.data) - zeros_(self.bias.data) - else: - normal_(self.bias.data, mean=bavg, std=stddev, generator=random_generator) - normal_( - self.matrix.data, - std=stddev / np.sqrt(self.num_in), - generator=random_generator, - ) - self.trainable = trainable - if not self.trainable: - self.matrix.stop_gradient = True - self.bias.stop_gradient = True - - def dim_out(self) -> int: - return self.matrix.shape[0] - - def forward( - self, - xx: paddle.Tensor, - ) -> paddle.Tensor: - """One Layer Norm used by DP model. - - Parameters - ---------- - xx : paddle.Tensor - The input of index. - - Returns - ------- - yy: paddle.Tensor - The output. - """ - # mean = xx.mean(dim=-1, keepdim=True) - # variance = xx.var(dim=-1, unbiased=False, keepdim=True) - # The following operation is the same as above, but will not raise error when using jit model to inference. - # See https://github.com/pytorch/pytorch/issues/85792 - if xx.numel() > 0: - variance, mean = ( - paddle.var(xx, axis=-1, unbiased=False, keepdim=True), - paddle.mean(xx, axis=-1, keepdim=True), - ) - yy = (xx - mean) / paddle.sqrt(variance + self.eps) - else: - yy = xx - if self.matrix is not None and self.bias is not None: - yy = yy * self.matrix + self.bias - return yy - - def serialize(self) -> dict: - """Serialize the layer to a dict. - - Returns - ------- - dict - The serialized layer. - """ - nl = DPLayerNorm( - self.matrix.shape[0], - eps=self.eps, - trainable=self.trainable, - precision=self.precision, - ) - nl.w = to_numpy_array(self.matrix) - nl.b = to_numpy_array(self.bias) - data = nl.serialize() - return data - - @classmethod - def deserialize(cls, data: dict) -> "LayerNorm": - """Deserialize the layer from a dict. - - Parameters - ---------- - data : dict - The dict to deserialize from. - """ - nl = DPLayerNorm.deserialize(data) - obj = cls( - nl["matrix"].shape[0], - eps=nl["eps"], - trainable=nl["trainable"], - precision=nl["precision"], - ) - prec = PRECISION_DICT[obj.precision] - - def check_load_param(ss): - if nl[ss] is not None: - tensor = to_paddle_tensor(nl[ss]) - return paddle.create_parameter( - tensor.shape, - dtype=tensor.dtype, - default_initializer=nn.initializer.Assign(tensor), - ) - return None - - obj.matrix = check_load_param("matrix") - obj.bias = check_load_param("bias") - return obj From 351bf7a03940973b2c4644293841260e9303b264 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 25 Nov 2024 13:34:50 +0800 Subject: [PATCH 41/58] update dpa1 code --- deepmd/pd/model/descriptor/__init__.py | 6 + deepmd/pd/model/descriptor/dpa1.py | 674 ++++++++++++++++ deepmd/pd/model/descriptor/se_atten.py | 1019 ++++++++++++++++++++++++ deepmd/pd/model/network/layernorm.py | 165 ++++ deepmd/pd/train/training.py | 6 +- deepmd/pd/utils/decomp.py | 49 +- deepmd/pd/utils/env.py | 64 +- 7 files changed, 1942 insertions(+), 41 deletions(-) create mode 100644 deepmd/pd/model/descriptor/dpa1.py create mode 100644 deepmd/pd/model/descriptor/se_atten.py create mode 100644 deepmd/pd/model/network/layernorm.py diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 654643959b..7eaa0df85b 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -5,6 +5,10 @@ from .descriptor import ( DescriptorBlock, ) +from .dpa1 import ( + DescrptBlockSeAtten, + DescrptDPA1, +) from .env_mat import ( prod_env_mat, ) @@ -17,6 +21,8 @@ "BaseDescriptor", "DescriptorBlock", "DescrptBlockSeA", + "DescrptBlockSeAtten", + "DescrptDPA1", "DescrptSeA", "prod_env_mat", ] diff --git a/deepmd/pd/model/descriptor/dpa1.py b/deepmd/pd/model/descriptor/dpa1.py new file mode 100644 index 0000000000..0d8b9dc9b1 --- /dev/null +++ b/deepmd/pd/model/descriptor/dpa1.py @@ -0,0 +1,674 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) +from .se_atten import ( + DescrptBlockSeAtten, + NeighborGatedAttention, +) + + +@BaseDescriptor.register("dpa1") +@BaseDescriptor.register("se_atten") +class DescrptDPA1(BaseDescriptor, paddle.nn.Layer): + r"""Attention-based descriptor which is proposed in the pretrainable DPA-1[1] model. + + This descriptor, :math:`\mathcal{D}^i \in \mathbb{R}^{M \times M_{<}}`, is given by + + .. math:: + \mathcal{D}^i = \frac{1}{N_c^2}(\hat{\mathcal{G}}^i)^T \mathcal{R}^i (\mathcal{R}^i)^T \hat{\mathcal{G}}^i_<, + + where :math:`\hat{\mathcal{G}}^i` represents the embedding matrix:math:`\mathcal{G}^i` + after additional self-attention mechanism and :math:`\mathcal{R}^i` is defined by the full case in the se_e2_a descriptor. + Note that we obtain :math:`\mathcal{G}^i` using the type embedding method by default in this descriptor. + + To perform the self-attention mechanism, the queries :math:`\mathcal{Q}^{i,l} \in \mathbb{R}^{N_c\times d_k}`, + keys :math:`\mathcal{K}^{i,l} \in \mathbb{R}^{N_c\times d_k}`, + and values :math:`\mathcal{V}^{i,l} \in \mathbb{R}^{N_c\times d_v}` are first obtained: + + .. math:: + \left(\mathcal{Q}^{i,l}\right)_{j}=Q_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + .. math:: + \left(\mathcal{K}^{i,l}\right)_{j}=K_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + .. math:: + \left(\mathcal{V}^{i,l}\right)_{j}=V_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + where :math:`Q_{l}`, :math:`K_{l}`, :math:`V_{l}` represent three trainable linear transformations + that output the queries and keys of dimension :math:`d_k` and values of dimension :math:`d_v`, and :math:`l` + is the index of the attention layer. + The input embedding matrix to the attention layers, denoted by :math:`\mathcal{G}^{i,0}`, + is chosen as the two-body embedding matrix. + + Then the scaled dot-product attention method is adopted: + + .. math:: + A(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}, \mathcal{V}^{i,l}, \mathcal{R}^{i,l})=\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right)\mathcal{V}^{i,l}, + + where :math:`\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right) \in \mathbb{R}^{N_c\times N_c}` is attention weights. + In the original attention method, + one typically has :math:`\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}\right)=\mathrm{softmax}\left(\frac{\mathcal{Q}^{i,l} (\mathcal{K}^{i,l})^{T}}{\sqrt{d_{k}}}\right)`, + with :math:`\sqrt{d_{k}}` being the normalization temperature. + This is slightly modified to incorporate the angular information: + + .. math:: + \varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right) = \mathrm{softmax}\left(\frac{\mathcal{Q}^{i,l} (\mathcal{K}^{i,l})^{T}}{\sqrt{d_{k}}}\right) \odot \hat{\mathcal{R}}^{i}(\hat{\mathcal{R}}^{i})^{T}, + + where :math:`\hat{\mathcal{R}}^{i} \in \mathbb{R}^{N_c\times 3}` denotes normalized relative coordinates, + :math:`\hat{\mathcal{R}}^{i}_{j} = \frac{\boldsymbol{r}_{ij}}{\lVert \boldsymbol{r}_{ij} \lVert}` + and :math:`\odot` means element-wise multiplication. + + Then layer normalization is added in a residual way to finally obtain the self-attention local embedding matrix + :math:`\hat{\mathcal{G}}^{i} = \mathcal{G}^{i,L_a}` after :math:`L_a` attention layers:[^1] + + .. math:: + \mathcal{G}^{i,l} = \mathcal{G}^{i,l-1} + \mathrm{LayerNorm}(A(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}, \mathcal{V}^{i,l}, \mathcal{R}^{i,l})). + + Parameters + ---------- + rcut: float + The cut-off radius :math:`r_c` + rcut_smth: float + From where the environment matrix should be smoothed :math:`r_s` + sel : list[int], int + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net :math:`\mathcal{N}` + axis_neuron: int + Number of the axis neuron :math:`M_2` (number of columns of the sub-matrix of the embedding matrix) + tebd_dim: int + Dimension of the type embedding + tebd_input_mode: str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. + resnet_dt: bool + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + trainable: bool + If the weights of this descriptors are trainable. + trainable_ln: bool + Whether to use trainable shift and scale weights in layer normalization. + ln_eps: float, Optional + The epsilon value for layer normalization. + type_one_side: bool + If 'False', type embeddings of both neighbor and central atoms are considered. + If 'True', only type embeddings of neighbor atoms are considered. + Default is 'False'. + attn: int + Hidden dimension of the attention vectors + attn_layer: int + Number of attention layers + attn_dotr: bool + If dot the angular gate to the attention weights + attn_mask: bool + (Only support False to keep consistent with other backend references.) + (Not used in this version. True option is not implemented.) + If mask the diagonal of attention weights + exclude_types : list[list[int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection: float + Protection parameter to prevent division by zero errors during environment matrix calculations. + set_davg_zero: bool + Set the shift of embedding net input to zero. + activation_function: str + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision: str + The precision of the embedding net parameters. Supported options are |PRECISION| + scaling_factor: float + The scaling factor of normalization in calculations of attention weights. + If `temperature` is None, the scaling of attention weights is (N_dim * scaling_factor)**0.5 + normalize: bool + Whether to normalize the hidden vectors in attention weights calculation. + temperature: float + If not None, the scaling of attention weights is `temperature` itself. + smooth_type_embedding: bool + Whether to use smooth process in attention weights calculation. + concat_output_tebd: bool + Whether to concat type embedding at the output of the descriptor. + stripped_type_embedding: bool, Optional + (Deprecated, kept only for compatibility.) + Whether to strip the type embedding into a separate embedding network. + Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. + Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. + The default value is `None`, which means the `tebd_input_mode` setting will be used instead. + seed: int, Optional + Random seed for parameter initialization. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + spin + (Only support None to keep consistent with other backend references.) + (Not used in this version. Not-none option is not implemented.) + The old implementation of deepspin. + + Limitations + ----------- + The currently implementation will not support the following deprecated features + 1. spin is not None + 2. attn_mask == True + + References + ---------- + .. [1] Duo Zhang, Hangrui Bi, Fu-Zhi Dai, Wanrun Jiang, Linfeng Zhang, and Han Wang. 2022. + DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular Simulation. + arXiv preprint arXiv:2208.08236. + """ + + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + scaling_factor: int = 1.0, + normalize=True, + temperature=None, + concat_output_tebd: bool = True, + trainable: bool = True, + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + smooth_type_embedding: bool = True, + type_one_side: bool = False, + stripped_type_embedding: Optional[bool] = None, + seed: Optional[Union[int, list[int]]] = None, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + # not implemented + spin=None, + type: Optional[str] = None, + ): + super().__init__() + # Ensure compatibility with the deprecated stripped_type_embedding option. + if stripped_type_embedding is not None: + # Use the user-set stripped_type_embedding parameter first + tebd_input_mode = "strip" if stripped_type_embedding else "concat" + if spin is not None: + raise NotImplementedError("old implementation of spin is not supported.") + if attn_mask: + raise NotImplementedError( + "old implementation of attn_mask is not supported." + ) + # to keep consistent with default value in this backends + if ln_eps is None: + ln_eps = 1e-5 + + self.tebd_input_mode = tebd_input_mode + + del type, spin, attn_mask + self.se_atten = DescrptBlockSeAtten( + rcut, + rcut_smth, + sel, + ntypes, + neuron=neuron, + axis_neuron=axis_neuron, + tebd_dim=tebd_dim, + tebd_input_mode=tebd_input_mode, + set_davg_zero=set_davg_zero, + attn=attn, + attn_layer=attn_layer, + attn_dotr=attn_dotr, + attn_mask=False, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + smooth=smooth_type_embedding, + type_one_side=type_one_side, + exclude_types=exclude_types, + env_protection=env_protection, + trainable_ln=trainable_ln, + ln_eps=ln_eps, + seed=child_seed(seed, 1), + ) + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.compress = False + self.type_embedding = TypeEmbedNet( + ntypes, + tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + ) + self.tebd_dim = tebd_dim + self.concat_output_tebd = concat_output_tebd + self.trainable = trainable + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.se_atten.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.se_atten.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.se_atten.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.se_atten.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.se_atten.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + ret = self.se_atten.get_dim_out() + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + return self.se_atten.dim_emb + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return self.se_atten.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.se_atten.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.se_atten.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.se_atten.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA1 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in both type_embedding and se_atten + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.se_atten.share_params(base_class.se_atten, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding + elif shared_level == 1: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.se_atten.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.se_atten.mean = mean + self.se_atten.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.se_atten.mean, self.se_atten.stddev + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_atten + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_atten + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + + def serialize(self) -> dict: + obj = self.se_atten + data = { + "@class": "Descriptor", + "type": "dpa1", + "@version": 2, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "attn": obj.attn_dim, + "attn_layer": obj.attn_layer, + "attn_dotr": obj.attn_dotr, + "attn_mask": False, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + "scaling_factor": obj.scaling_factor, + "normalize": obj.normalize, + "temperature": obj.temperature, + "trainable_ln": obj.trainable_ln, + "ln_eps": obj.ln_eps, + "smooth_type_embedding": obj.smooth, + "type_one_side": obj.type_one_side, + "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "attention_layers": obj.dpa1_attention.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "type_embedding": self.type_embedding.embedding.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "trainable": self.trainable, + "spin": None, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.filter_layers_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptDPA1": + data = data.copy() + check_version_compatibility(data.pop("@version"), 2, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + type_embedding = data.pop("type_embedding") + attention_layers = data.pop("attention_layers") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.se_atten.prec).to(device=env.DEVICE) + + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + obj.se_atten["davg"] = t_cvt(variables["davg"]) + obj.se_atten["dstd"] = t_cvt(variables["dstd"]) + obj.se_atten.filter_layers = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + obj.se_atten.filter_layers_strip = NetworkCollection.deserialize( + embeddings_strip + ) + obj.se_atten.dpa1_attention = NeighborGatedAttention.deserialize( + attention_layers + ) + return obj + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + # do some checks before the mocel compression process + raise NotImplementedError("Model compression is not supported in paddle yet.") + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + del mapping + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + g1, g2, h2, rot_mat, sw = self.se_atten( + nlist, + extended_coord, + extended_atype, + g1_ext, + mapping=None, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + + return g1, rot_mat, g2, h2, sw + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, sel = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], True + ) + local_jdata_cpy["sel"] = sel[0] + return local_jdata_cpy, min_nbor_dist diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py new file mode 100644 index 0000000000..89407e6923 --- /dev/null +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -0,0 +1,1019 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle +import paddle.nn as nn +import paddle.nn.functional as paddle_func + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.layernorm import ( + LayerNorm, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + MLPLayer, + NetworkCollection, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +@DescriptorBlock.register("se_atten") +class DescrptBlockSeAtten(DescriptorBlock): + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + activation_function="tanh", + precision: str = "float64", + resnet_dt: bool = False, + scaling_factor=1.0, + normalize=True, + temperature=None, + smooth: bool = True, + type_one_side: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + seed: Optional[Union[int, list[int]]] = None, + type: Optional[str] = None, + ): + r"""Construct an embedding net of type `se_atten`. + + Parameters + ---------- + rcut : float + The cut-off radius :math:`r_c` + rcut_smth : float + From where the environment matrix should be smoothed :math:`r_s` + sel : list[int], int + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net :math:`\mathcal{N}` + axis_neuron : int + Number of the axis neuron :math:`M_2` (number of columns of the sub-matrix of the embedding matrix) + tebd_dim : int + Dimension of the type embedding + tebd_input_mode : str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. + resnet_dt : bool + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + trainable_ln : bool + Whether to use trainable shift and scale weights in layer normalization. + ln_eps : float, Optional + The epsilon value for layer normalization. + type_one_side : bool + If 'False', type embeddings of both neighbor and central atoms are considered. + If 'True', only type embeddings of neighbor atoms are considered. + Default is 'False'. + attn : int + Hidden dimension of the attention vectors + attn_layer : int + Number of attention layers + attn_dotr : bool + If dot the angular gate to the attention weights + attn_mask : bool + (Only support False to keep consistent with other backend references.) + (Not used in this version.) + If mask the diagonal of attention weights + exclude_types : list[list[int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float + Protection parameter to prevent division by zero errors during environment matrix calculations. + set_davg_zero : bool + Set the shift of embedding net input to zero. + activation_function : str + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision : str + The precision of the embedding net parameters. Supported options are |PRECISION| + scaling_factor : float + The scaling factor of normalization in calculations of attention weights. + If `temperature` is None, the scaling of attention weights is (N_dim * scaling_factor)**0.5 + normalize : bool + Whether to normalize the hidden vectors in attention weights calculation. + temperature : float + If not None, the scaling of attention weights is `temperature` itself. + seed : int, Optional + Random seed for parameter initialization. + """ + super().__init__() + del type + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.neuron = neuron + self.filter_neuron = self.neuron + self.axis_neuron = axis_neuron + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.set_davg_zero = set_davg_zero + self.attn_dim = attn + self.attn_layer = attn_layer + self.attn_dotr = attn_dotr + self.attn_mask = attn_mask + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.smooth = smooth + self.type_one_side = type_one_side + self.env_protection = env_protection + self.trainable_ln = trainable_ln + self.seed = seed + # to keep consistent with default value in this backends + if ln_eps is None: + ln_eps = 1e-5 + self.ln_eps = ln_eps + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + self.dpa1_attention = NeighborGatedAttention( + self.attn_layer, + self.nnei, + self.filter_neuron[-1], + self.attn_dim, + dotr=self.attn_dotr, + do_mask=self.attn_mask, + scaling_factor=self.scaling_factor, + normalize=self.normalize, + temperature=self.temperature, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + smooth=self.smooth, + precision=self.precision, + seed=child_seed(self.seed, 0), + ) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.tebd_dim_input = self.tebd_dim if self.type_one_side else self.tebd_dim * 2 + if self.tebd_input_mode in ["concat"]: + self.embd_input_dim = 1 + self.tebd_dim_input + else: + self.embd_input_dim = 1 + + self.filter_layers_strip = None + filter_layers = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers[0] = EmbeddingNet( + self.embd_input_dim, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 1), + ) + self.filter_layers = filter_layers + if self.tebd_input_mode in ["strip"]: + filter_layers_strip = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers_strip[0] = EmbeddingNet( + self.tebd_dim_input, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 2), + ) + self.filter_layers_strip = filter_layers_strip + self.stats = None + + # add for compression + self.compress = False + self.is_sorted = False + # self.compress_info = nn.ParameterList( + # [self.create_parameter([0], dtype=self.prec).to("cpu")] + # ) + # self.compress_data = nn.ParameterList( + # [self.create_parameter([0], dtype=self.prec).to(env.DEVICE)] + # ) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + + def get_dim_emb(self) -> int: + """Returns the output dimension of embedding.""" + return self.filter_neuron[-1] + + def __setitem__(self, key, value): + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.tebd_dim + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + self.exclude_types = exclude_types + self.is_sorted = len(self.exclude_types) == 0 + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + raise NotImplementedError( + "Compressed descriptor in paddle is not supported yet." + ) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall x nt + extended_atype_embd + The extended type embedding of atoms. shape: nf x nall + mapping + The index mapping, not required by this descriptor. + + Returns + ------- + result + The descriptor. shape: nf x nloc x (ng x axis_neuron) + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + del mapping + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + nb = nframes + nall = extended_coord.reshape([nb, -1, 3]).shape[1] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + nlist_mask = nlist != -1 + nlist = paddle.where(nlist == -1, paddle.zeros_like(nlist), nlist) + sw = paddle.squeeze(sw, -1) + # nf x nloc x nt -> nf x nloc x nnei x nt + atype_tebd = extended_atype_embd[:, :nloc, :] + atype_tebd_nnei = atype_tebd.unsqueeze(2).expand([-1, -1, self.nnei, -1]) + # nf x nall x nt + nt = extended_atype_embd.shape[-1] + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) + # nb x (nloc x nnei) x nt + # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) + atype_tebd_nlist = decomp.take_along_axis(atype_tebd_ext, axis=1, indices=index) + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + # (nb x nloc) x nnei + exclude_mask = exclude_mask.reshape([nb * nloc, nnei]) + + # nfnl x nnei x 4 + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + nfnl = dmatrix.shape[0] + # nfnl x nnei x 4 + rr = dmatrix + rr = rr * exclude_mask[:, :, None].astype(rr.dtype) + ss = rr[:, :, :1] + nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) + atype_tebd = atype_tebd_nnei.reshape([nfnl, nnei, self.tebd_dim]) + if self.tebd_input_mode in ["concat"]: + if not self.type_one_side: + # nfnl x nnei x (1 + tebd_dim * 2) + ss = paddle.concat([ss, nlist_tebd, atype_tebd], axis=2) + else: + # nfnl x nnei x (1 + tebd_dim) + ss = paddle.concat([ss, nlist_tebd], axis=2) + # nfnl x nnei x ng + gg = self.filter_layers.networks[0](ss) + input_r = paddle.nn.functional.normalize( + rr.reshape([-1, self.nnei, 4])[:, :, 1:4], axis=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = paddle.matmul(rr.transpose([0, 2, 1]), gg) + elif self.tebd_input_mode in ["strip"]: + if self.compress: + raise NotImplementedError("Compression is not implemented yet.") + else: + # nfnl x nnei x ng + gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + if not self.type_one_side: + # nfnl x nnei x (tebd_dim * 2) + tt = paddle.concat( + [nlist_tebd, atype_tebd], axis=2 + ) # dynamic, index + else: + # nfnl x nnei x tebd_dim + tt = nlist_tebd + # nfnl x nnei x ng + gg_t = self.filter_layers_strip.networks[0](tt) + if self.smooth: + gg_t = gg_t * sw.reshape([-1, self.nnei, 1]) + # nfnl x nnei x ng + gg = gg_s * gg_t + gg_s + input_r = decomp.normalize( + rr.reshape([-1, self.nnei, 4])[:, :, 1:4], axis=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = paddle.matmul(rr.transpose([0, 2, 1]), gg) + else: + raise NotImplementedError + + xyz_scatter = xyz_scatter / self.nnei + xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) + rot_mat = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = paddle.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nloc, self.filter_neuron[-1], self.axis_neuron] + + return ( + result.reshape([nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]), + gg.reshape([nframes, nloc, self.nnei, self.filter_neuron[-1]]) + if not self.compress + else None, + dmatrix.reshape([nframes, nloc, self.nnei, 4])[..., 1:], + rot_mat.reshape([nframes, nloc, self.filter_neuron[-1], 3]), + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False + + +class NeighborGatedAttention(nn.Layer): + def __init__( + self, + layer_num: int, + nnei: int, + embed_dim: int, + hidden_dim: int, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + trainable_ln: bool = True, + ln_eps: float = 1e-5, + smooth: bool = True, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ): + """Construct a neighbor-wise attention net.""" + super().__init__() + self.layer_num = layer_num + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.dotr = dotr + self.do_mask = do_mask + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.smooth = smooth + self.precision = precision + self.seed = seed + self.network_type = NeighborGatedAttentionLayer + attention_layers = [] + for i in range(self.layer_num): + attention_layers.append( + NeighborGatedAttentionLayer( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + trainable_ln=trainable_ln, + ln_eps=ln_eps, + smooth=smooth, + precision=precision, + seed=child_seed(seed, i), + ) + ) + self.attention_layers = nn.LayerList(attention_layers) + + def forward( + self, + input_G, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + ): + """Compute the multi-layer gated self-attention. + + Parameters + ---------- + input_G + inputs with shape: (nf x nloc) x nnei x embed_dim. + nei_mask + neighbor mask, with paddings being 0. shape: (nf x nloc) x nnei. + input_r + normalized radial. shape: (nf x nloc) x nnei x 3. + sw + The smooth switch function. shape: nf x nloc x nnei + """ + out = input_G + for layer in self.attention_layers: + out = layer(out, nei_mask, input_r=input_r, sw=sw) + return out + + def __getitem__(self, key): + if isinstance(key, int): + return self.attention_layers[key] + else: + raise TypeError(key) + + def __setitem__(self, key, value): + if not isinstance(key, int): + raise TypeError(key) + if isinstance(value, self.network_type): + pass + elif isinstance(value, dict): + value = self.network_type.deserialize(value) + else: + raise TypeError(value) + self.attention_layers[key] = value + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "NeighborGatedAttention", + "@version": 1, + "layer_num": self.layer_num, + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "trainable_ln": self.trainable_ln, + "ln_eps": self.ln_eps, + "precision": self.precision, + "attention_layers": [layer.serialize() for layer in self.attention_layers], + } + + @classmethod + def deserialize(cls, data: dict) -> "NeighborGatedAttention": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + attention_layers = data.pop("attention_layers") + obj = cls(**data) + for ii, network in enumerate(attention_layers): + obj[ii] = network + return obj + + +class NeighborGatedAttentionLayer(nn.Layer): + def __init__( + self, + nnei: int, + embed_dim: int, + hidden_dim: int, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + smooth: bool = True, + trainable_ln: bool = True, + ln_eps: float = 1e-5, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ): + """Construct a neighbor-wise attention layer.""" + super().__init__() + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.dotr = dotr + self.do_mask = do_mask + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.precision = precision + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.seed = seed + self.attention_layer = GatedAttentionLayer( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + smooth=smooth, + precision=precision, + seed=child_seed(seed, 0), + ) + self.attn_layer_norm = LayerNorm( + self.embed_dim, + eps=ln_eps, + trainable=trainable_ln, + precision=precision, + seed=child_seed(seed, 1), + ) + + def forward( + self, + x, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + ): + residual = x + x, _ = self.attention_layer(x, nei_mask, input_r=input_r, sw=sw) + x = residual + x + x = self.attn_layer_norm(x) + return x + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "trainable_ln": self.trainable_ln, + "ln_eps": self.ln_eps, + "precision": self.precision, + "attention_layer": self.attention_layer.serialize(), + "attn_layer_norm": self.attn_layer_norm.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "NeighborGatedAttentionLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + attention_layer = data.pop("attention_layer") + attn_layer_norm = data.pop("attn_layer_norm") + obj = cls(**data) + obj.attention_layer = GatedAttentionLayer.deserialize(attention_layer) + obj.attn_layer_norm = LayerNorm.deserialize(attn_layer_norm) + return obj + + +class GatedAttentionLayer(nn.Layer): + def __init__( + self, + nnei: int, + embed_dim: int, + hidden_dim: int, + num_heads: int = 1, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + bias: bool = True, + smooth: bool = True, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ): + """Construct a multi-head neighbor-wise attention net.""" + super().__init__() + assert hidden_dim % num_heads == 0, "hidden_dim must be divisible by num_heads" + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.num_heads = num_heads + self.head_dim = hidden_dim // num_heads + self.dotr = dotr + self.do_mask = do_mask + self.bias = bias + self.smooth = smooth + self.scaling_factor = scaling_factor + self.temperature = temperature + self.precision = precision + self.seed = seed + self.scaling = ( + (self.head_dim * scaling_factor) ** -0.5 + if temperature is None + else temperature + ) + self.normalize = normalize + self.in_proj = MLPLayer( + embed_dim, + hidden_dim * 3, + bias=bias, + use_timestep=False, + bavg=0.0, + stddev=1.0, + precision=precision, + seed=child_seed(seed, 0), + ) + self.out_proj = MLPLayer( + hidden_dim, + embed_dim, + bias=bias, + use_timestep=False, + bavg=0.0, + stddev=1.0, + precision=precision, + seed=child_seed(seed, 1), + ) + + def forward( + self, + query, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + attnw_shift: float = 20.0, + ): + """Compute the multi-head gated self-attention. + + Parameters + ---------- + query + inputs with shape: (nf x nloc) x nnei x embed_dim. + nei_mask + neighbor mask, with paddings being 0. shape: (nf x nloc) x nnei. + input_r + normalized radial. shape: (nf x nloc) x nnei x 3. + sw + The smooth switch function. shape: (nf x nloc) x nnei + attnw_shift : float + The attention weight shift to preserve smoothness when doing padding before softmax. + """ + q, k, v = self.in_proj(query).chunk(3, axis=-1) + + # Reshape for multi-head attention: (nf x nloc) x num_heads x nnei x head_dim + q = q.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + k = k.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + v = v.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + + if self.normalize: + q = paddle_func.normalize(q, axis=-1) + k = paddle_func.normalize(k, axis=-1) + v = paddle_func.normalize(v, axis=-1) + + q = q * self.scaling + # (nf x nloc) x num_heads x head_dim x nnei + k = k.transpose([0, 1, 3, 2]) + + # Compute attention scores + # (nf x nloc) x num_heads x nnei x nnei + attn_weights = paddle.matmul(q, k) + # (nf x nloc) x nnei + nei_mask = nei_mask.reshape([-1, self.nnei]) + + if self.smooth: + assert sw is not None + # (nf x nloc) x 1 x nnei + sw = sw.reshape([-1, 1, self.nnei]) + attn_weights = (attn_weights + attnw_shift) * sw[:, :, :, None] * sw[ + :, :, None, : + ] - attnw_shift + else: + # (nf x nloc) x 1 x 1 x nnei + attn_weights = attn_weights.masked_fill( + ~nei_mask.unsqueeze(1).unsqueeze(1), float("-inf") + ) + + attn_weights = paddle_func.softmax(attn_weights, axis=-1) + attn_weights = attn_weights.masked_fill( + ~nei_mask.unsqueeze(1).unsqueeze(-1), 0.0 + ) + if self.smooth: + assert sw is not None + attn_weights = attn_weights * sw[:, :, :, None] * sw[:, :, None, :] + + if self.dotr: + # (nf x nloc) x nnei x 3 + assert input_r is not None, "input_r must be provided when dotr is True!" + # (nf x nloc) x 1 x nnei x nnei + angular_weight = paddle.matmul( + input_r, input_r.transpose([0, 2, 1]) + ).reshape([-1, 1, self.nnei, self.nnei]) + attn_weights = attn_weights * angular_weight + + # Apply attention to values + # (nf x nloc) x nnei x (num_heads x head_dim) + o = ( + paddle.matmul(attn_weights, v) + .transpose([0, 2, 1, 3]) + .reshape([-1, self.nnei, self.hidden_dim]) + ) + output = self.out_proj(o) + return output, attn_weights + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "num_heads": self.num_heads, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "bias": self.bias, + "smooth": self.smooth, + "precision": self.precision, + "in_proj": self.in_proj.serialize(), + "out_proj": self.out_proj.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "GatedAttentionLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + in_proj = data.pop("in_proj") + out_proj = data.pop("out_proj") + obj = cls(**data) + obj.in_proj = MLPLayer.deserialize(in_proj) + obj.out_proj = MLPLayer.deserialize(out_proj) + return obj diff --git a/deepmd/pd/model/network/layernorm.py b/deepmd/pd/model/network/layernorm.py new file mode 100644 index 0000000000..4d37b208f9 --- /dev/null +++ b/deepmd/pd/model/network/layernorm.py @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.network import LayerNorm as DPLayerNorm +from deepmd.pd.model.network.init import ( + normal_, + ones_, + zeros_, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + get_generator, + to_numpy_array, + to_paddle_tensor, +) + +device = env.DEVICE + + +def empty_t(shape, precision): + return paddle.empty(shape, dtype=precision).to(device=device) + + +class LayerNorm(nn.Layer): + def __init__( + self, + num_in, + eps: float = 1e-5, + uni_init: bool = True, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ): + super().__init__() + self.eps = eps + self.uni_init = uni_init + self.num_in = num_in + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t((num_in,), self.prec), + ), + ) + self.bias = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign(empty_t([num_in], self.prec)), + ) + random_generator = get_generator(seed) + if self.uni_init: + ones_(self.matrix.data) + zeros_(self.bias.data) + else: + normal_(self.bias.data, mean=bavg, std=stddev, generator=random_generator) + normal_( + self.matrix.data, + std=stddev / np.sqrt(self.num_in), + generator=random_generator, + ) + self.trainable = trainable + if not self.trainable: + self.matrix.stop_gradient = True + self.bias.stop_gradient = True + + def dim_out(self) -> int: + return self.matrix.shape[0] + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """One Layer Norm used by DP model. + + Parameters + ---------- + xx : paddle.Tensor + The input of index. + + Returns + ------- + yy: paddle.Tensor + The output. + """ + # if xx.numel() > 0: + if decomp.numel(xx): + variance, mean = ( + paddle.var(xx, axis=-1, unbiased=False, keepdim=True), + paddle.mean(xx, axis=-1, keepdim=True), + ) + yy = (xx - mean) / paddle.sqrt(variance + self.eps) + else: + yy = xx + if self.matrix is not None and self.bias is not None: + yy = yy * self.matrix + self.bias + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = DPLayerNorm( + self.matrix.shape[0], + eps=self.eps, + trainable=self.trainable, + precision=self.precision, + ) + nl.w = to_numpy_array(self.matrix) + nl.b = to_numpy_array(self.bias) + data = nl.serialize() + return data + + @classmethod + def deserialize(cls, data: dict) -> "LayerNorm": + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = DPLayerNorm.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + eps=nl["eps"], + trainable=nl["trainable"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + if nl[ss] is not None: + tensor = to_paddle_tensor(nl[ss]) + return paddle.create_parameter( + tensor.shape, + dtype=tensor.dtype, + default_initializer=nn.initializer.Assign(tensor), + ) + return None + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + return obj diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index c22f60b847..e504e3fd65 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -658,6 +658,7 @@ def step(_step_id, task_key="Default"): # Paddle Profiler if enable_profiling: core.nvprof_nvtx_push(f"Training step {_step_id}") + self.wrapper.train() if isinstance(self.lr_exp, dict): _lr = self.lr_exp[task_key] @@ -704,8 +705,6 @@ def step(_step_id, task_key="Default"): self.scheduler.step() - if enable_profiling: - core.nvprof_nvtx_pop() else: raise ValueError(f"Not supported optimizer type '{self.opt_type}'") @@ -879,6 +878,9 @@ def log_loss_valid(_task_key="Default"): f"{task_key}/{item}", more_loss[item].item(), _step_id ) + if enable_profiling: + core.nvprof_nvtx_pop() + self.t0 = time.time() self.total_train_time = 0.0 for step_id in range(self.num_steps): diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 434301441a..42f3b9c7d1 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -10,39 +10,19 @@ annotations, ) +import numpy as np import paddle __all__ = [ - "softmax", "norm", "take_along_axis", "scatter_reduce", "sec", "masked_add_", + "numel", ] -# decomposition for forward function -def softmax_decomp(x: paddle.Tensor, axis: int = -1) -> paddle.Tensor: - """Forward decompsition function of softmax. - - Parameters - ---------- - x : paddle.Tensor - Input. - axis : int, defaults: -1. - A dimension along which softmax will be computed. - - Returns - ------- - paddle.Tensor - Computed output. - """ - x_max = paddle.max(x, axis=axis, keepdim=True) - x = x - x_max - return paddle.exp(x) / paddle.sum(paddle.exp(x), axis=axis, keepdim=True) - - def norm_decomp( x: paddle.Tensor, p: float = 2, axis: bool = -1, keepdim: bool = False ) -> paddle.Tensor: @@ -65,10 +45,7 @@ def norm_decomp( paddle.Tensor A real-valued tensor, even when A is complex. """ - if p == 2 or p == 2.0: - # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward - return (x * x).sum(axis=axis, keepdim=keepdim) ** 0.5 - return (x.abs() ** p).sum(axis=axis, keepdim=keepdim) ** (1 / p) + return paddle.linalg.norm(x, p=p, axis=axis, keepdim=keepdim) def take_along_axis_decomp( @@ -92,16 +69,7 @@ def take_along_axis_decomp( paddle.Tensor Computed output. """ - # manually contruct indices for gather_nd(ind_gather_nd.ndim == indices.ndim + 1, - # the lsat 1 represents the number of dimension(s) of indices) - ind_gather_nd = paddle.stack( - paddle.meshgrid(*[paddle.arange(v) for v in indices.shape], indexing="ij"), - axis=-1, - ) - ind_gather_nd[..., axis] = indices - # compute output using constructed indices via gather_nd - out = paddle.gather_nd(x, ind_gather_nd) - return out + return paddle.take_along_axis(x, indices, axis, broadcast) def scatter_reduce_decomp( @@ -235,7 +203,13 @@ def normalize_decomp( Computed output. """ return paddle.nn.functional.normalize(x, p, axis, epsilon) - # return x / norm(x, p=p, axis=axis, keepdim=True) + + +def numel(x: paddle.Tensor) -> int: + if paddle.in_dynamic_mode(): + return np.prod(x.shape) + + return paddle.numel(x) # alias for decomposed functions for convinience @@ -244,4 +218,3 @@ def normalize_decomp( scatter_reduce = scatter_reduce_decomp take_along_axis = take_along_axis_decomp norm = norm_decomp -softmax = softmax_decomp diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 4c104db374..1a359b3573 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -77,13 +77,75 @@ def enable_prim(enable: bool = True): + # operator in list below will not use composite + # operator but kernel instead + EAGER_COMP_OP_BLACK_LIST = [ + "abs_grad", + "cast_grad", + "concat_grad", + "cos_double_grad", + "cos_grad", + "cumprod_grad", + "cumsum_grad", + "dropout_grad", + "erf_grad", + "exp_grad", + "expand_grad", + "floor_grad", + "gather_grad", + "gather_nd_grad", + "gelu_grad", + "group_norm_grad", + "instance_norm_grad", + "layer_norm_grad", + "leaky_relu_grad", + "log_grad", + "max_grad", + "pad_grad", + "pow_double_grad", + "pow_grad", + "prod_grad", + "relu_grad", + "roll_grad", + "rsqrt_grad", + "scatter_grad", + "scatter_nd_add_grad", + "sigmoid_grad", + "silu_grad", + "sin_double_grad", + "sin_grad", + "slice_grad", + "split_grad", + "split_grad", + "sqrt_grad", + "stack_grad", + "sum_grad", + "tanh_double_grad", + "tanh_grad", + "topk_grad", + "transpose_grad", + "add_double_grad", + "add_grad", + "assign_grad", + "batch_norm_grad", + "divide_grad", + "elementwise_pow_grad", + "maximum_grad", + "min_grad", + "minimum_grad", + "multiply_grad", + "subtract_grad", + "tile_grad", + ] + """Enable running program in primitive C++ API in eager/static mode.""" from paddle.framework import ( core, ) core.set_prim_eager_enabled(enable) - core._set_prim_all_enabled(enable) + if enable: + paddle.framework.core._set_prim_backward_blacklist(*EAGER_COMP_OP_BLACK_LIST) log = logging.getLogger(__name__) log.info(f"{'Enable' if enable else 'Disable'} prim in eager and static mode.") From 701926adaa5c4f49aa7358157255bd11e2de8d7c Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 25 Nov 2024 21:22:36 +0800 Subject: [PATCH 42/58] update dpa2 code --- deepmd/pd/model/descriptor/__init__.py | 8 + deepmd/pd/model/descriptor/dpa2.py | 858 ++++++++++ deepmd/pd/model/descriptor/repformer_layer.py | 1481 +++++++++++++++++ deepmd/pd/model/descriptor/repformers.py | 580 +++++++ deepmd/pd/model/descriptor/se_t_tebd.py | 932 +++++++++++ deepmd/pd/utils/spin.py | 30 + 6 files changed, 3889 insertions(+) create mode 100644 deepmd/pd/model/descriptor/dpa2.py create mode 100644 deepmd/pd/model/descriptor/repformer_layer.py create mode 100644 deepmd/pd/model/descriptor/repformers.py create mode 100644 deepmd/pd/model/descriptor/se_t_tebd.py create mode 100644 deepmd/pd/utils/spin.py diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 7eaa0df85b..8935371c01 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -9,9 +9,15 @@ DescrptBlockSeAtten, DescrptDPA1, ) +from .dpa2 import ( + DescrptDPA2, +) from .env_mat import ( prod_env_mat, ) +from .repformers import ( + DescrptBlockRepformers, +) from .se_a import ( DescrptBlockSeA, DescrptSeA, @@ -23,6 +29,8 @@ "DescrptBlockSeA", "DescrptBlockSeAtten", "DescrptDPA1", + "DescrptDPA2", "DescrptSeA", "prod_env_mat", + "DescrptBlockRepformers", ] diff --git a/deepmd/pd/model/descriptor/dpa2.py b/deepmd/pd/model/descriptor/dpa2.py new file mode 100644 index 0000000000..8fbffe2d90 --- /dev/null +++ b/deepmd/pd/model/descriptor/dpa2.py @@ -0,0 +1,858 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.descriptor.dpa2 import ( + RepformerArgs, + RepinitArgs, +) +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + Identity, + MLPLayer, + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.nlist import ( + build_multiple_neighbor_list, + get_multiple_nlist_key, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) +from .repformer_layer import ( + RepformerLayer, +) +from .repformers import ( + DescrptBlockRepformers, +) +from .se_atten import ( + DescrptBlockSeAtten, +) +from .se_t_tebd import ( + DescrptBlockSeTTebd, +) + + +@BaseDescriptor.register("dpa2") +class DescrptDPA2(BaseDescriptor, paddle.nn.Layer): + def __init__( + self, + ntypes: int, + # args for repinit + repinit: Union[RepinitArgs, dict], + # args for repformer + repformer: Union[RepformerArgs, dict], + # kwargs for descriptor + concat_output_tebd: bool = True, + precision: str = "float64", + smooth: bool = True, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + add_tebd_to_repinit_out: bool = False, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + ): + r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. + + Parameters + ---------- + repinit : Union[RepinitArgs, dict] + The arguments used to initialize the repinit block, see docstr in `RepinitArgs` for details information. + repformer : Union[RepformerArgs, dict] + The arguments used to initialize the repformer block, see docstr in `RepformerArgs` for details information. + concat_output_tebd : bool, optional + Whether to concat type embedding at the output of the descriptor. + precision : str, optional + The precision of the embedding net parameters. + smooth : bool, optional + Whether to use smoothness in processes such as attention weights calculation. + exclude_types : list[list[int]], optional + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float, optional + Protection parameter to prevent division by zero errors during environment matrix calculations. + For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection. + trainable : bool, optional + If the parameters are trainable. + seed : int, optional + Random seed for parameter initialization. + add_tebd_to_repinit_out : bool, optional + Whether to add type embedding to the output representation from repinit before inputting it into repformer. + use_econf_tebd : bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map : list[str], Optional + A list of strings. Give the name to each type of atoms. + + Returns + ------- + descriptor: paddle.Tensor + the descriptor of shape nb x nloc x g1_dim. + invariant single-atom representation. + g2: paddle.Tensor + invariant pair-atom representation. + h2: paddle.Tensor + equivariant pair-atom representation. + rot_mat: paddle.Tensor + rotation matrix for equivariant fittings + sw: paddle.Tensor + The switch function for decaying inverse distance. + + """ + super().__init__() + + def init_subclass_params(sub_data, sub_class): + if isinstance(sub_data, dict): + return sub_class(**sub_data) + elif isinstance(sub_data, sub_class): + return sub_data + else: + raise ValueError( + f"Input args must be a {sub_class.__name__} class or a dict!" + ) + + self.repinit_args = init_subclass_params(repinit, RepinitArgs) + self.repformer_args = init_subclass_params(repformer, RepformerArgs) + + self.repinit = DescrptBlockSeAtten( + self.repinit_args.rcut, + self.repinit_args.rcut_smth, + self.repinit_args.nsel, + ntypes, + attn_layer=0, + neuron=self.repinit_args.neuron, + axis_neuron=self.repinit_args.axis_neuron, + tebd_dim=self.repinit_args.tebd_dim, + tebd_input_mode=self.repinit_args.tebd_input_mode, + set_davg_zero=self.repinit_args.set_davg_zero, + exclude_types=exclude_types, + env_protection=env_protection, + activation_function=self.repinit_args.activation_function, + precision=precision, + resnet_dt=self.repinit_args.resnet_dt, + smooth=smooth, + type_one_side=self.repinit_args.type_one_side, + seed=child_seed(seed, 0), + ) + self.use_three_body = self.repinit_args.use_three_body + if self.use_three_body: + self.repinit_three_body = DescrptBlockSeTTebd( + self.repinit_args.three_body_rcut, + self.repinit_args.three_body_rcut_smth, + self.repinit_args.three_body_sel, + ntypes, + neuron=self.repinit_args.three_body_neuron, + tebd_dim=self.repinit_args.tebd_dim, + tebd_input_mode=self.repinit_args.tebd_input_mode, + set_davg_zero=self.repinit_args.set_davg_zero, + exclude_types=exclude_types, + env_protection=env_protection, + activation_function=self.repinit_args.activation_function, + precision=precision, + resnet_dt=self.repinit_args.resnet_dt, + smooth=smooth, + seed=child_seed(seed, 5), + ) + else: + self.repinit_three_body = None + self.repformers = DescrptBlockRepformers( + self.repformer_args.rcut, + self.repformer_args.rcut_smth, + self.repformer_args.nsel, + ntypes, + nlayers=self.repformer_args.nlayers, + g1_dim=self.repformer_args.g1_dim, + g2_dim=self.repformer_args.g2_dim, + axis_neuron=self.repformer_args.axis_neuron, + direct_dist=self.repformer_args.direct_dist, + update_g1_has_conv=self.repformer_args.update_g1_has_conv, + update_g1_has_drrd=self.repformer_args.update_g1_has_drrd, + update_g1_has_grrg=self.repformer_args.update_g1_has_grrg, + update_g1_has_attn=self.repformer_args.update_g1_has_attn, + update_g2_has_g1g1=self.repformer_args.update_g2_has_g1g1, + update_g2_has_attn=self.repformer_args.update_g2_has_attn, + update_h2=self.repformer_args.update_h2, + attn1_hidden=self.repformer_args.attn1_hidden, + attn1_nhead=self.repformer_args.attn1_nhead, + attn2_hidden=self.repformer_args.attn2_hidden, + attn2_nhead=self.repformer_args.attn2_nhead, + attn2_has_gate=self.repformer_args.attn2_has_gate, + activation_function=self.repformer_args.activation_function, + update_style=self.repformer_args.update_style, + update_residual=self.repformer_args.update_residual, + update_residual_init=self.repformer_args.update_residual_init, + set_davg_zero=self.repformer_args.set_davg_zero, + smooth=smooth, + exclude_types=exclude_types, + env_protection=env_protection, + precision=precision, + trainable_ln=self.repformer_args.trainable_ln, + ln_eps=self.repformer_args.ln_eps, + use_sqrt_nnei=self.repformer_args.use_sqrt_nnei, + g1_out_conv=self.repformer_args.g1_out_conv, + g1_out_mlp=self.repformer_args.g1_out_mlp, + seed=child_seed(seed, 1), + ) + self.rcsl_list = [ + (self.repformers.get_rcut(), self.repformers.get_nsel()), + (self.repinit.get_rcut(), self.repinit.get_nsel()), + ] + if self.use_three_body: + self.rcsl_list.append( + (self.repinit_three_body.get_rcut(), self.repinit_three_body.get_nsel()) + ) + self.rcsl_list.sort() + for ii in range(1, len(self.rcsl_list)): + assert ( + self.rcsl_list[ii - 1][1] <= self.rcsl_list[ii][1] + ), "rcut and sel are not in the same order" + self.rcut_list = [ii[0] for ii in self.rcsl_list] + self.nsel_list = [ii[1] for ii in self.rcsl_list] + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.type_embedding = TypeEmbedNet( + ntypes, + self.repinit_args.tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=self.use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + ) + self.concat_output_tebd = concat_output_tebd + self.precision = precision + self.smooth = smooth + self.exclude_types = exclude_types + self.env_protection = env_protection + self.trainable = trainable + self.add_tebd_to_repinit_out = add_tebd_to_repinit_out + + self.repinit_out_dim = self.repinit.dim_out + if self.repinit_args.use_three_body: + assert self.repinit_three_body is not None + self.repinit_out_dim += self.repinit_three_body.dim_out + + if self.repinit_out_dim == self.repformers.dim_in: + self.g1_shape_tranform = Identity() + else: + self.g1_shape_tranform = MLPLayer( + self.repinit_out_dim, + self.repformers.dim_in, + bias=False, + precision=precision, + init="glorot", + seed=child_seed(seed, 3), + ) + self.tebd_transform = None + if self.add_tebd_to_repinit_out: + self.tebd_transform = MLPLayer( + self.repinit_args.tebd_dim, + self.repformers.dim_in, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + assert self.repinit.rcut > self.repformers.rcut + assert self.repinit.sel[0] > self.repformers.sel[0] + + self.tebd_dim = self.repinit_args.tebd_dim + self.rcut = self.repinit.get_rcut() + self.rcut_smth = self.repinit.get_rcut_smth() + self.ntypes = ntypes + self.sel = self.repinit.sel + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension of this descriptor.""" + ret = self.repformers.dim_out + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + """Returns the embedding dimension of this descriptor.""" + return self.repformers.dim_emb + + def mixed_types(self) -> bool: + """If true, the discriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the discriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return any( + [self.repinit.has_message_passing(), self.repformers.has_message_passing()] + ) + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + # the env_protection of repinit is the same as that of the repformer + return self.repinit.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA2 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in type_embedding, repinit and repformers + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.repinit.share_params(base_class.repinit, 0, resume=resume) + self._sub_layers["g1_shape_tranform"] = base_class._sub_layers[ + "g1_shape_tranform" + ] + self.repformers.share_params(base_class.repformers, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding and repinit + elif shared_level == 1: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.repinit.share_params(base_class.repinit, 0, resume=resume) + # shared_level: 2 + # share all parameters in type_embedding and repformers + elif shared_level == 2: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self._sub_layers["g1_shape_tranform"] = base_class._sub_layers[ + "g1_shape_tranform" + ] + self.repformers.share_params(base_class.repformers, 0, resume=resume) + # shared_level: 3 + # share all parameters in type_embedding + elif shared_level == 3: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + # Other shared levels + else: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + self.exclude_types = map_pair_exclude_types(self.exclude_types, remap_index) + self.ntypes = len(type_map) + repinit = self.repinit + repformers = self.repformers + repinit_three_body = self.repinit_three_body + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + repinit, + type_map, + des_with_stat=model_with_new_type_stat.repinit + if model_with_new_type_stat is not None + else None, + ) + extend_descrpt_stat( + repformers, + type_map, + des_with_stat=model_with_new_type_stat.repformers + if model_with_new_type_stat is not None + else None, + ) + if self.use_three_body: + extend_descrpt_stat( + repinit_three_body, + type_map, + des_with_stat=model_with_new_type_stat.repinit_three_body + if model_with_new_type_stat is not None + else None, + ) + repinit.ntypes = self.ntypes + repformers.ntypes = self.ntypes + repinit.reinit_exclude(self.exclude_types) + repformers.reinit_exclude(self.exclude_types) + repinit["davg"] = repinit["davg"][remap_index] + repinit["dstd"] = repinit["dstd"][remap_index] + repformers["davg"] = repformers["davg"][remap_index] + repformers["dstd"] = repformers["dstd"][remap_index] + if self.use_three_body: + repinit_three_body.ntypes = self.ntypes + repinit_three_body.reinit_exclude(self.exclude_types) + repinit_three_body["davg"] = repinit_three_body["davg"][remap_index] + repinit_three_body["dstd"] = repinit_three_body["dstd"][remap_index] + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + descrpt_list = [self.repinit, self.repformers] + if self.use_three_body: + descrpt_list.append(self.repinit_three_body) + for ii, descrpt in enumerate(descrpt_list): + descrpt.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: list[paddle.Tensor], + stddev: list[paddle.Tensor], + ) -> None: + """Update mean and stddev for descriptor.""" + descrpt_list = [self.repinit, self.repformers] + if self.use_three_body: + descrpt_list.append(self.repinit_three_body) + for ii, descrpt in enumerate(descrpt_list): + descrpt.mean = mean[ii] + descrpt.stddev = stddev[ii] + + def get_stat_mean_and_stddev( + self, + ) -> tuple[list[paddle.Tensor], list[paddle.Tensor]]: + """Get mean and stddev for descriptor.""" + mean_list = [self.repinit.mean, self.repformers.mean] + stddev_list = [ + self.repinit.stddev, + self.repformers.stddev, + ] + if self.use_three_body: + mean_list.append(self.repinit_three_body.mean) + stddev_list.append(self.repinit_three_body.stddev) + return mean_list, stddev_list + + def serialize(self) -> dict: + repinit = self.repinit + repformers = self.repformers + repinit_three_body = self.repinit_three_body + data = { + "@class": "Descriptor", + "type": "dpa2", + "@version": 3, + "ntypes": self.ntypes, + "repinit_args": self.repinit_args.serialize(), + "repformer_args": self.repformer_args.serialize(), + "concat_output_tebd": self.concat_output_tebd, + "precision": self.precision, + "smooth": self.smooth, + "exclude_types": self.exclude_types, + "env_protection": self.env_protection, + "trainable": self.trainable, + "add_tebd_to_repinit_out": self.add_tebd_to_repinit_out, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + "type_embedding": self.type_embedding.embedding.serialize(), + "g1_shape_tranform": self.g1_shape_tranform.serialize(), + } + if self.add_tebd_to_repinit_out: + data.update( + { + "tebd_transform": self.tebd_transform.serialize(), + } + ) + repinit_variable = { + "embeddings": repinit.filter_layers.serialize(), + "env_mat": DPEnvMat(repinit.rcut, repinit.rcut_smth).serialize(), + "@variables": { + "davg": to_numpy_array(repinit["davg"]), + "dstd": to_numpy_array(repinit["dstd"]), + }, + } + if repinit.tebd_input_mode in ["strip"]: + repinit_variable.update( + {"embeddings_strip": repinit.filter_layers_strip.serialize()} + ) + repformers_variable = { + "g2_embd": repformers.g2_embd.serialize(), + "repformer_layers": [layer.serialize() for layer in repformers.layers], + "env_mat": DPEnvMat(repformers.rcut, repformers.rcut_smth).serialize(), + "@variables": { + "davg": to_numpy_array(repformers["davg"]), + "dstd": to_numpy_array(repformers["dstd"]), + }, + } + data.update( + { + "repinit_variable": repinit_variable, + "repformers_variable": repformers_variable, + } + ) + if self.use_three_body: + repinit_three_body_variable = { + "embeddings": repinit_three_body.filter_layers.serialize(), + "env_mat": DPEnvMat( + repinit_three_body.rcut, repinit_three_body.rcut_smth + ).serialize(), + "@variables": { + "davg": to_numpy_array(repinit_three_body["davg"]), + "dstd": to_numpy_array(repinit_three_body["dstd"]), + }, + } + if repinit_three_body.tebd_input_mode in ["strip"]: + repinit_three_body_variable.update( + { + "embeddings_strip": repinit_three_body.filter_layers_strip.serialize() + } + ) + data.update( + { + "repinit_three_body_variable": repinit_three_body_variable, + } + ) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptDPA2": + data = data.copy() + version = data.pop("@version") + check_version_compatibility(version, 3, 1) + data.pop("@class") + data.pop("type") + repinit_variable = data.pop("repinit_variable").copy() + repformers_variable = data.pop("repformers_variable").copy() + repinit_three_body_variable = ( + data.pop("repinit_three_body_variable").copy() + if "repinit_three_body_variable" in data + else None + ) + type_embedding = data.pop("type_embedding") + g1_shape_tranform = data.pop("g1_shape_tranform") + tebd_transform = data.pop("tebd_transform", None) + add_tebd_to_repinit_out = data["add_tebd_to_repinit_out"] + if version < 3: + # compat with old version + data["repformer_args"]["use_sqrt_nnei"] = False + data["repformer_args"]["g1_out_conv"] = False + data["repformer_args"]["g1_out_mlp"] = False + data["repinit"] = RepinitArgs(**data.pop("repinit_args")) + data["repformer"] = RepformerArgs(**data.pop("repformer_args")) + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + obj = cls(**data) + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + if add_tebd_to_repinit_out: + assert isinstance(tebd_transform, dict) + obj.tebd_transform = MLPLayer.deserialize(tebd_transform) + if obj.repinit.dim_out != obj.repformers.dim_in: + obj.g1_shape_tranform = MLPLayer.deserialize(g1_shape_tranform) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.repinit.prec, place=env.DEVICE) + + # deserialize repinit + statistic_repinit = repinit_variable.pop("@variables") + env_mat = repinit_variable.pop("env_mat") + tebd_input_mode = data["repinit"].tebd_input_mode + obj.repinit.filter_layers = NetworkCollection.deserialize( + repinit_variable.pop("embeddings") + ) + if tebd_input_mode in ["strip"]: + obj.repinit.filter_layers_strip = NetworkCollection.deserialize( + repinit_variable.pop("embeddings_strip") + ) + obj.repinit["davg"] = t_cvt(statistic_repinit["davg"]) + obj.repinit["dstd"] = t_cvt(statistic_repinit["dstd"]) + + if data["repinit"].use_three_body: + # deserialize repinit_three_body + statistic_repinit_three_body = repinit_three_body_variable.pop("@variables") + env_mat = repinit_three_body_variable.pop("env_mat") + tebd_input_mode = data["repinit"].tebd_input_mode + obj.repinit_three_body.filter_layers = NetworkCollection.deserialize( + repinit_three_body_variable.pop("embeddings") + ) + if tebd_input_mode in ["strip"]: + obj.repinit_three_body.filter_layers_strip = ( + NetworkCollection.deserialize( + repinit_three_body_variable.pop("embeddings_strip") + ) + ) + obj.repinit_three_body["davg"] = t_cvt(statistic_repinit_three_body["davg"]) + obj.repinit_three_body["dstd"] = t_cvt(statistic_repinit_three_body["dstd"]) + + # deserialize repformers + statistic_repformers = repformers_variable.pop("@variables") + env_mat = repformers_variable.pop("env_mat") + repformer_layers = repformers_variable.pop("repformer_layers") + obj.repformers.g2_embd = MLPLayer.deserialize( + repformers_variable.pop("g2_embd") + ) + obj.repformers["davg"] = t_cvt(statistic_repformers["davg"]) + obj.repformers["dstd"] = t_cvt(statistic_repformers["dstd"]) + obj.repformers.layers = paddle.nn.LayerList( + [RepformerLayer.deserialize(layer) for layer in repformer_layers] + ) + return obj + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, mapps extended region index to local region. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + use_three_body = self.use_three_body + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + # nlists + nlist_dict = build_multiple_neighbor_list( + extended_coord, + nlist, + self.rcut_list, + self.nsel_list, + ) + # repinit + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + g1, _, _, _, _ = self.repinit( + nlist_dict[ + get_multiple_nlist_key(self.repinit.get_rcut(), self.repinit.get_nsel()) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + ) + if use_three_body: + assert self.repinit_three_body is not None + g1_three_body, __, __, __, __ = self.repinit_three_body( + nlist_dict[ + get_multiple_nlist_key( + self.repinit_three_body.get_rcut(), + self.repinit_three_body.get_nsel(), + ) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + ) + g1 = paddle.concat([g1, g1_three_body], axis=-1) + # linear to change shape + g1 = self.g1_shape_tranform(g1) + if self.add_tebd_to_repinit_out: + assert self.tebd_transform is not None + g1 = g1 + self.tebd_transform(g1_inp) + # mapping g1 + if comm_dict is None: + assert mapping is not None + mapping_ext = ( + mapping.reshape([nframes, nall]) + .unsqueeze(-1) + .expand([-1, -1, g1.shape[-1]]) + ) + g1_ext = decomp.take_along_axis(g1, mapping_ext, 1) + g1 = g1_ext + # repformer + g1, g2, h2, rot_mat, sw = self.repformers( + nlist_dict[ + get_multiple_nlist_key( + self.repformers.get_rcut(), self.repformers.get_nsel() + ) + ], + extended_coord, + extended_atype, + g1, + mapping, + comm_dict, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + return g1, rot_mat, g2, h2, sw + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statictics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + update_sel = UpdateSel() + min_nbor_dist, repinit_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repinit"]["rcut"], + local_jdata_cpy["repinit"]["nsel"], + True, + ) + local_jdata_cpy["repinit"]["nsel"] = repinit_sel[0] + min_nbor_dist, repformer_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repformer"]["rcut"], + local_jdata_cpy["repformer"]["nsel"], + True, + ) + local_jdata_cpy["repformer"]["nsel"] = repformer_sel[0] + return local_jdata_cpy, min_nbor_dist diff --git a/deepmd/pd/model/descriptor/repformer_layer.py b/deepmd/pd/model/descriptor/repformer_layer.py new file mode 100644 index 0000000000..816e16b05d --- /dev/null +++ b/deepmd/pd/model/descriptor/repformer_layer.py @@ -0,0 +1,1481 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.init import ( + constant_, + normal_, +) +from deepmd.pd.model.network.layernorm import ( + LayerNorm, +) +from deepmd.pd.model.network.mlp import ( + MLPLayer, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + ActivationFn, + get_generator, + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +def get_residual( + _dim: int, + _scale: float, + _mode: str = "norm", + trainable: bool = True, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, +) -> paddle.Tensor: + r""" + Get residual tensor for one update vector. + + Parameters + ---------- + _dim : int + The dimension of the update vector. + _scale + The initial scale of the residual tensor. See `_mode` for details. + _mode + The mode of residual initialization for the residual tensor. + - "norm" (default): init residual using normal with `_scale` std. + - "const": init residual using element-wise constants of `_scale`. + trainable + Whether the residual tensor is trainable. + precision + The precision of the residual tensor. + seed : int, optional + Random seed for parameter initialization. + """ + random_generator = get_generator(seed) + residual = paddle.create_parameter( + [_dim], + dtype=PRECISION_DICT[precision], + default_initializer=nn.initializer.Constant(0), + ).to(device=env.DEVICE) + residual.stop_gradient = not trainable + if _mode == "norm": + normal_(residual.data, std=_scale, generator=random_generator) + elif _mode == "const": + constant_(residual.data, val=_scale) + else: + raise RuntimeError(f"Unsupported initialization mode '{_mode}'!") + return residual + + +# common ops +def _make_nei_g1( + g1_ext: paddle.Tensor, + nlist: paddle.Tensor, +) -> paddle.Tensor: + """ + Make neighbor-wise atomic invariant rep. + + Parameters + ---------- + g1_ext + Extended atomic invariant rep, with shape nb x nall x ng1. + nlist + Neighbor list, with shape nb x nloc x nnei. + + Returns + ------- + gg1: paddle.Tensor + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + + """ + # nlist: nb x nloc x nnei + nb, nloc, nnei = nlist.shape + # g1_ext: nb x nall x ng1 + ng1 = g1_ext.shape[-1] + # index: nb x (nloc x nnei) x ng1 + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, ng1]) + # gg1 : nb x (nloc x nnei) x ng1 + gg1 = decomp.take_along_axis(g1_ext, axis=1, indices=index) + # gg1 : nb x nloc x nnei x ng1 + gg1 = gg1.reshape([nb, nloc, nnei, ng1]) + return gg1 + + +def _apply_nlist_mask( + gg: paddle.Tensor, + nlist_mask: paddle.Tensor, +) -> paddle.Tensor: + """ + Apply nlist mask to neighbor-wise rep tensors. + + Parameters + ---------- + gg + Neighbor-wise rep tensors, with shape nf x nloc x nnei x d. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nf x nloc x nnei. + """ + # gg: nf x nloc x nnei x d + # msk: nf x nloc x nnei + return gg.masked_fill(~nlist_mask.unsqueeze(-1), 0.0) + + +def _apply_switch(gg: paddle.Tensor, sw: paddle.Tensor) -> paddle.Tensor: + """ + Apply switch function to neighbor-wise rep tensors. + + Parameters + ---------- + gg + Neighbor-wise rep tensors, with shape nf x nloc x nnei x d. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nf x nloc x nnei. + """ + # gg: nf x nloc x nnei x d + # sw: nf x nloc x nnei + return gg * sw.unsqueeze(-1) + + +class Atten2Map(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + hidden_dim: int, + head_num: int, + has_gate: bool = False, # apply gate to attn map + smooth: bool = True, + attnw_shift: float = 20.0, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ): + """Return neighbor-wise multi-head self-attention maps, with gate mechanism.""" + super().__init__() + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.head_num = head_num + self.mapqk = MLPLayer( + input_dim, + hidden_dim * 2 * head_num, + bias=False, + precision=precision, + seed=seed, + ) + self.has_gate = has_gate + self.smooth = smooth + self.attnw_shift = attnw_shift + self.precision = precision + + def forward( + self, + g2: paddle.Tensor, # nb x nloc x nnei x ng2 + h2: paddle.Tensor, # nb x nloc x nnei x 3 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + ( + nb, + nloc, + nnei, + _, + ) = g2.shape + nd, nh = self.hidden_dim, self.head_num + # nb x nloc x nnei x nd x (nh x 2) + g2qk = self.mapqk(g2).reshape([nb, nloc, nnei, nd, nh * 2]) + # nb x nloc x (nh x 2) x nnei x nd + g2qk = paddle.transpose(g2qk, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd + g2q, g2k = paddle.split(g2qk, decomp.sec(g2qk.shape[2], nh), axis=2) + # g2q = paddle.nn.functional.normalize(g2q, axis=-1) + # g2k = paddle.nn.functional.normalize(g2k, axis=-1) + # nb x nloc x nh x nnei x nnei + attnw = paddle.matmul(g2q, paddle.transpose(g2k, [0, 1, 2, 4, 3])) / nd**0.5 + if self.has_gate: + gate = paddle.matmul(h2, paddle.transpose(h2, [0, 1, 3, 2])).unsqueeze(-3) + attnw = attnw * gate + # mask the attenmap, nb x nloc x 1 x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(2).unsqueeze(2) + # mask the attenmap, nb x nloc x 1 x nnei x 1 + attnw_mask_c = ~nlist_mask.unsqueeze(2).unsqueeze(-1) + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw[:, :, None, :, None] * sw[ + :, :, None, None, : + ] - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = paddle.nn.functional.softmax(attnw, axis=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + # nb x nloc x nh x nnei x nnei + attnw = attnw.masked_fill( + attnw_mask_c, + 0.0, + ) + if self.smooth: + attnw = attnw * sw[:, :, None, :, None] * sw[:, :, None, None, :] + # nb x nloc x nnei x nnei + h2h2t = paddle.matmul(h2, paddle.transpose(h2, [0, 1, 3, 2])) / 3.0**0.5 + # nb x nloc x nh x nnei x nnei + ret = attnw * h2h2t[:, :, None, :, :] + # ret = paddle.nn.functional.softmax(g2qk, axis=-1) + # nb x nloc x nnei x nnei x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)) + return ret + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2Map", + "@version": 1, + "input_dim": self.input_dim, + "hidden_dim": self.hidden_dim, + "head_num": self.head_num, + "has_gate": self.has_gate, + "smooth": self.smooth, + "attnw_shift": self.attnw_shift, + "precision": self.precision, + "mapqk": self.mapqk.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2Map": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapqk = data.pop("mapqk") + obj = cls(**data) + obj.mapqk = MLPLayer.deserialize(mapqk) + return obj + + +class Atten2MultiHeadApply(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + head_num: int, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.head_num = head_num + self.mapv = MLPLayer( + input_dim, + input_dim * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 0), + ) + self.head_map = MLPLayer( + input_dim * head_num, + input_dim, + precision=precision, + seed=child_seed(seed, 1), + ) + self.precision = precision + + def forward( + self, + AA: paddle.Tensor, # nf x nloc x nnei x nnei x nh + g2: paddle.Tensor, # nf x nloc x nnei x ng2 + ) -> paddle.Tensor: + nf, nloc, nnei, ng2 = g2.shape + nh = self.head_num + # nf x nloc x nnei x ng2 x nh + g2v = self.mapv(g2).reshape([nf, nloc, nnei, ng2, nh]) + # nf x nloc x nh x nnei x ng2 + g2v = paddle.transpose(g2v, (0, 1, 4, 2, 3)) + # g2v = paddle.nn.functional.normalize(g2v, axis=-1) + # nf x nloc x nh x nnei x nnei + AA = paddle.transpose(AA, (0, 1, 4, 2, 3)) + # nf x nloc x nh x nnei x ng2 + ret = paddle.matmul(AA, g2v) + # nf x nloc x nnei x ng2 x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)).reshape( + [nf, nloc, nnei, (ng2 * nh)] + ) + # nf x nloc x nnei x ng2 + return self.head_map(ret) + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2MultiHeadApply", + "@version": 1, + "input_dim": self.input_dim, + "head_num": self.head_num, + "precision": self.precision, + "mapv": self.mapv.serialize(), + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2MultiHeadApply": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapv = data.pop("mapv") + head_map = data.pop("head_map") + obj = cls(**data) + obj.mapv = MLPLayer.deserialize(mapv) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class Atten2EquiVarApply(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + head_num: int, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.head_num = head_num + self.head_map = MLPLayer( + head_num, 1, bias=False, precision=precision, seed=seed + ) + self.precision = precision + + def forward( + self, + AA: paddle.Tensor, # nf x nloc x nnei x nnei x nh + h2: paddle.Tensor, # nf x nloc x nnei x 3 + ) -> paddle.Tensor: + nf, nloc, nnei, _ = h2.shape + nh = self.head_num + # nf x nloc x nh x nnei x nnei + AA = paddle.transpose(AA, (0, 1, 4, 2, 3)) + h2m = paddle.unsqueeze(h2, axis=2) + # nf x nloc x nh x nnei x 3 + h2m = paddle.tile(h2m, [1, 1, nh, 1, 1]) + # nf x nloc x nh x nnei x 3 + ret = paddle.matmul(AA, h2m) + # nf x nloc x nnei x 3 x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)).reshape([nf, nloc, nnei, 3, nh]) + # nf x nloc x nnei x 3 + return paddle.squeeze(self.head_map(ret), axis=-1) + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2EquiVarApply", + "@version": 1, + "input_dim": self.input_dim, + "head_num": self.head_num, + "precision": self.precision, + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2EquiVarApply": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + head_map = data.pop("head_map") + obj = cls(**data) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class LocalAtten(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + hidden_dim: int, + head_num: int, + smooth: bool = True, + attnw_shift: float = 20.0, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.head_num = head_num + self.mapq = MLPLayer( + input_dim, + hidden_dim * 1 * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 0), + ) + self.mapkv = MLPLayer( + input_dim, + (hidden_dim + input_dim) * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 1), + ) + self.head_map = MLPLayer( + input_dim * head_num, + input_dim, + precision=precision, + seed=child_seed(seed, 2), + ) + self.smooth = smooth + self.attnw_shift = attnw_shift + self.precision = precision + + def forward( + self, + g1: paddle.Tensor, # nb x nloc x ng1 + gg1: paddle.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + nb, nloc, nnei = nlist_mask.shape + ni, nd, nh = self.input_dim, self.hidden_dim, self.head_num + assert ni == g1.shape[-1] + assert ni == gg1.shape[-1] + # nb x nloc x nd x nh + g1q = self.mapq(g1).reshape([nb, nloc, nd, nh]) + # nb x nloc x nh x nd + g1q = paddle.transpose(g1q, (0, 1, 3, 2)) + # nb x nloc x nnei x (nd+ni) x nh + gg1kv = self.mapkv(gg1).reshape([nb, nloc, nnei, nd + ni, nh]) + gg1kv = paddle.transpose(gg1kv, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd, nb x nloc x nh x nnei x ng1 + gg1k, gg1v = paddle.split(gg1kv, [nd, ni], axis=-1) + + # nb x nloc x nh x 1 x nnei + attnw = ( + paddle.matmul(g1q.unsqueeze(-2), paddle.transpose(gg1k, [0, 1, 2, 4, 3])) + / nd**0.5 + ) + # nb x nloc x nh x nnei + attnw = attnw.squeeze(-2) + # mask the attenmap, nb x nloc x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(-2) + # nb x nloc x nh x nnei + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw.unsqueeze(-2) - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = paddle.nn.functional.softmax(attnw, axis=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + if self.smooth: + attnw = attnw * sw.unsqueeze(-2) + + # nb x nloc x nh x ng1 + ret = ( + paddle.matmul(attnw.unsqueeze(-2), gg1v) + .squeeze(-2) + .reshape([nb, nloc, nh * ni]) + ) + # nb x nloc x ng1 + ret = self.head_map(ret) + return ret + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "LocalAtten", + "@version": 1, + "input_dim": self.input_dim, + "hidden_dim": self.hidden_dim, + "head_num": self.head_num, + "smooth": self.smooth, + "attnw_shift": self.attnw_shift, + "precision": self.precision, + "mapq": self.mapq.serialize(), + "mapkv": self.mapkv.serialize(), + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "LocalAtten": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapq = data.pop("mapq") + mapkv = data.pop("mapkv") + head_map = data.pop("head_map") + obj = cls(**data) + obj.mapq = MLPLayer.deserialize(mapq) + obj.mapkv = MLPLayer.deserialize(mapkv) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class RepformerLayer(paddle.nn.Layer): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + g1_dim=128, + g2_dim=16, + axis_neuron: int = 4, + update_chnnl_2: bool = True, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation_function: str = "tanh", + update_style: str = "res_avg", + update_residual: float = 0.001, + update_residual_init: str = "norm", + smooth: bool = True, + precision: str = "float64", + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + use_sqrt_nnei: bool = True, + g1_out_conv: bool = True, + g1_out_mlp: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.epsilon = 1e-4 # protection of 1./nnei + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.ntypes = ntypes + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + assert len(sel) == 1 + self.sel = sel + self.sec = self.sel + self.axis_neuron = axis_neuron + self.activation_function = activation_function + self.act = ActivationFn(activation_function) + self.update_g1_has_grrg = update_g1_has_grrg + self.update_g1_has_drrd = update_g1_has_drrd + self.update_g1_has_conv = update_g1_has_conv + self.update_g1_has_attn = update_g1_has_attn + self.update_chnnl_2 = update_chnnl_2 + self.update_g2_has_g1g1 = update_g2_has_g1g1 if self.update_chnnl_2 else False + self.update_g2_has_attn = update_g2_has_attn if self.update_chnnl_2 else False + self.update_h2 = update_h2 if self.update_chnnl_2 else False + del update_g2_has_g1g1, update_g2_has_attn, update_h2 + self.attn1_hidden = attn1_hidden + self.attn1_nhead = attn1_nhead + self.attn2_hidden = attn2_hidden + self.attn2_nhead = attn2_nhead + self.attn2_has_gate = attn2_has_gate + self.update_style = update_style + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.smooth = smooth + self.g1_dim = g1_dim + self.g2_dim = g2_dim + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.precision = precision + self.seed = seed + self.use_sqrt_nnei = use_sqrt_nnei + self.g1_out_conv = g1_out_conv + self.g1_out_mlp = g1_out_mlp + + assert update_residual_init in [ + "norm", + "const", + ], "'update_residual_init' only support 'norm' or 'const'!" + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.g1_residual = [] + self.g2_residual = [] + self.h2_residual = [] + + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 0), + ) + ) + + g1_in_dim = self.cal_1_dim(g1_dim, g2_dim, self.axis_neuron) + self.linear1 = MLPLayer( + g1_in_dim, + g1_dim, + precision=precision, + seed=child_seed(seed, 1), + ) + self.linear2 = None + self.proj_g1g2 = None + self.proj_g1g1g2 = None + self.attn2g_map = None + self.attn2_mh_apply = None + self.attn2_lm = None + self.attn2_ev_apply = None + self.loc_attn = None + + if self.update_chnnl_2: + self.linear2 = MLPLayer( + g2_dim, + g2_dim, + precision=precision, + seed=child_seed(seed, 2), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 3), + ) + ) + if self.g1_out_mlp: + self.g1_self_mlp = MLPLayer( + g1_dim, + g1_dim, + precision=precision, + seed=child_seed(seed, 15), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 16), + ) + ) + else: + self.g1_self_mlp = None + if self.update_g1_has_conv: + if not self.g1_out_conv: + self.proj_g1g2 = MLPLayer( + g1_dim, + g2_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + else: + self.proj_g1g2 = MLPLayer( + g2_dim, + g1_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 17), + ) + ) + if self.update_g2_has_g1g1: + self.proj_g1g1g2 = MLPLayer( + g1_dim, + g2_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 5), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 6), + ) + ) + if self.update_g2_has_attn or self.update_h2: + self.attn2g_map = Atten2Map( + g2_dim, + attn2_hidden, + attn2_nhead, + attn2_has_gate, + self.smooth, + precision=precision, + seed=child_seed(seed, 7), + ) + if self.update_g2_has_attn: + self.attn2_mh_apply = Atten2MultiHeadApply( + g2_dim, attn2_nhead, precision=precision, seed=child_seed(seed, 8) + ) + self.attn2_lm = LayerNorm( + g2_dim, + eps=ln_eps, + trainable=trainable_ln, + precision=precision, + seed=child_seed(seed, 9), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 10), + ) + ) + + if self.update_h2: + self.attn2_ev_apply = Atten2EquiVarApply( + g2_dim, attn2_nhead, precision=precision, seed=child_seed(seed, 11) + ) + if self.update_style == "res_residual": + self.h2_residual.append( + get_residual( + 1, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 12), + ) + ) + if self.update_g1_has_attn: + self.loc_attn = LocalAtten( + g1_dim, + attn1_hidden, + attn1_nhead, + self.smooth, + precision=precision, + seed=child_seed(seed, 13), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 14), + ) + ) + + self.g1_residual = nn.ParameterList(self.g1_residual) + self.g2_residual = nn.ParameterList(self.g2_residual) + self.h2_residual = nn.ParameterList(self.h2_residual) + + def cal_1_dim(self, g1d: int, g2d: int, ax: int) -> int: + ret = g1d if not self.g1_out_mlp else 0 + if self.update_g1_has_grrg: + ret += g2d * ax + if self.update_g1_has_drrd: + ret += g1d * ax + if self.update_g1_has_conv and not self.g1_out_conv: + ret += g2d + return ret + + def _update_h2( + self, + h2: paddle.Tensor, + attn: paddle.Tensor, + ) -> paddle.Tensor: + """ + Calculate the attention weights update for pair-wise equivariant rep. + + Parameters + ---------- + h2 + Pair-wise equivariant rep tensors, with shape nf x nloc x nnei x 3. + attn + Attention weights from g2 attention, with shape nf x nloc x nnei x nnei x nh2. + """ + assert self.attn2_ev_apply is not None + # nf x nloc x nnei x nh2 + h2_1 = self.attn2_ev_apply(attn, h2) + return h2_1 + + def _update_g1_conv( + self, + gg1: paddle.Tensor, + g2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + ) -> paddle.Tensor: + """ + Calculate the convolution update for atomic invariant rep. + + Parameters + ---------- + gg1 + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + g2 + Pair invariant rep, with shape nb x nloc x nnei x ng2. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + """ + assert self.proj_g1g2 is not None + nb, nloc, nnei, _ = g2.shape + ng1 = gg1.shape[-1] + ng2 = g2.shape[-1] + if not self.g1_out_conv: + # gg1 : nb x nloc x nnei x ng2 + gg1 = self.proj_g1g2(gg1).reshape([nb, nloc, nnei, ng2]) + else: + gg1 = gg1.reshape([nb, nloc, nnei, ng1]) + # nb x nloc x nnei x ng2/ng1 + gg1 = _apply_nlist_mask(gg1, nlist_mask) + if not self.smooth: + # normalized by number of neighbors, not smooth + # nb x nloc x 1 + # must use astype here to convert bool to float, otherwise there will be numerical difference from numpy + invnnei = 1.0 / ( + self.epsilon + paddle.sum(nlist_mask.astype(gg1.dtype), axis=-1) + ).unsqueeze(-1) + else: + gg1 = _apply_switch(gg1, sw) + invnnei = (1.0 / float(nnei)) * paddle.ones( + (nb, nloc, 1), dtype=gg1.dtype + ).to(device=gg1.place) + if not self.g1_out_conv: + # nb x nloc x ng2 + g1_11 = paddle.sum(g2 * gg1, axis=2) * invnnei + else: + g2 = self.proj_g1g2(g2).reshape([nb, nloc, nnei, ng1]) + # nb x nloc x ng1 + g1_11 = paddle.sum(g2 * gg1, axis=2) * invnnei + return g1_11 + + @staticmethod + def _cal_hg( + g2: paddle.Tensor, + h2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + smooth: bool = True, + epsilon: float = 1e-4, + use_sqrt_nnei: bool = True, + ) -> paddle.Tensor: + """ + Calculate the transposed rotation matrix. + + Parameters + ---------- + g2 + Neighbor-wise/Pair-wise invariant rep tensors, with shape nb x nloc x nnei x ng2. + h2 + Neighbor-wise/Pair-wise equivariant rep tensors, with shape nb x nloc x nnei x 3. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + smooth + Whether to use smoothness in processes such as attention weights calculation. + epsilon + Protection of 1./nnei. + + Returns + ------- + hg + The transposed rotation matrix, with shape nb x nloc x 3 x ng2. + """ + # g2: nb x nloc x nnei x ng2 + # h2: nb x nloc x nnei x 3 + # msk: nb x nloc x nnei + nb, nloc, nnei, _ = g2.shape + ng2 = g2.shape[-1] + # nb x nloc x nnei x ng2 + g2 = _apply_nlist_mask(g2, nlist_mask) + if not smooth: + # nb x nloc + # must use type_as here to convert bool to float, otherwise there will be numerical difference from numpy + if not use_sqrt_nnei: + invnnei = 1.0 / (epsilon + paddle.sum(nlist_mask.type_as(g2), axis=-1)) + else: + invnnei = 1.0 / ( + epsilon + paddle.sqrt(paddle.sum(nlist_mask.type_as(g2), axis=-1)) + ) + # nb x nloc x 1 x 1 + invnnei = invnnei.unsqueeze(-1).unsqueeze(-1) + else: + g2 = _apply_switch(g2, sw) + if not use_sqrt_nnei: + invnnei = (1.0 / float(nnei)) * paddle.ones( + (nb, nloc, 1, 1), dtype=g2.dtype + ).to(device=g2.place) + else: + invnnei = paddle.rsqrt( + float(nnei) + * paddle.ones((nb, nloc, 1, 1), dtype=g2.dtype).to(device=g2.place) + ) + # nb x nloc x 3 x ng2 + h2g2 = paddle.matmul(paddle.transpose(h2, [0, 1, 3, 2]), g2) * invnnei + return h2g2 + + @staticmethod + def _cal_grrg(h2g2: paddle.Tensor, axis_neuron: int) -> paddle.Tensor: + """ + Calculate the atomic invariant rep. + + Parameters + ---------- + h2g2 + The transposed rotation matrix, with shape nb x nloc x 3 x ng2. + axis_neuron + Size of the submatrix. + + Returns + ------- + grrg + Atomic invariant rep, with shape nb x nloc x (axis_neuron x ng2) + """ + # nb x nloc x 3 x ng2 + nb, nloc, _, ng2 = h2g2.shape + # nb x nloc x 3 x axis + # h2g2m = paddle.split(h2g2, decomp.sec(h2g2.shape[-1], axis_neuron), axis=-1)[0] + h2g2m = h2g2[..., :axis_neuron] # use slice instead of split + # nb x nloc x axis x ng2 + g1_13 = paddle.matmul(paddle.transpose(h2g2m, [0, 1, 3, 2]), h2g2) / (3.0**1) + # nb x nloc x (axisxng2) + g1_13 = g1_13.reshape([nb, nloc, axis_neuron * ng2]) + return g1_13 + + def symmetrization_op( + self, + g2: paddle.Tensor, + h2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + axis_neuron: int, + smooth: bool = True, + epsilon: float = 1e-4, + ) -> paddle.Tensor: + """ + Symmetrization operator to obtain atomic invariant rep. + + Parameters + ---------- + g2 + Neighbor-wise/Pair-wise invariant rep tensors, with shape nb x nloc x nnei x ng2. + h2 + Neighbor-wise/Pair-wise equivariant rep tensors, with shape nb x nloc x nnei x 3. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + axis_neuron + Size of the submatrix. + smooth + Whether to use smoothness in processes such as attention weights calculation. + epsilon + Protection of 1./nnei. + + Returns + ------- + grrg + Atomic invariant rep, with shape nb x nloc x (axis_neuron x ng2) + """ + # g2: nb x nloc x nnei x ng2 + # h2: nb x nloc x nnei x 3 + # msk: nb x nloc x nnei + nb, nloc, nnei, _ = g2.shape + # nb x nloc x 3 x ng2 + h2g2 = self._cal_hg( + g2, + h2, + nlist_mask, + sw, + smooth=smooth, + epsilon=epsilon, + use_sqrt_nnei=self.use_sqrt_nnei, + ) + # nb x nloc x (axisxng2) + g1_13 = self._cal_grrg(h2g2, axis_neuron) + return g1_13 + + def _update_g2_g1g1( + self, + g1: paddle.Tensor, # nb x nloc x ng1 + gg1: paddle.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + """ + Update the g2 using element-wise dot g1_i * g1_j. + + Parameters + ---------- + g1 + Atomic invariant rep, with shape nb x nloc x ng1. + gg1 + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + """ + ret = g1.unsqueeze(-2) * gg1 + # nb x nloc x nnei x ng1 + ret = _apply_nlist_mask(ret, nlist_mask) + if self.smooth: + ret = _apply_switch(ret, sw) + return ret + + def forward( + self, + g1_ext: paddle.Tensor, # nf x nall x ng1 + g2: paddle.Tensor, # nf x nloc x nnei x ng2 + h2: paddle.Tensor, # nf x nloc x nnei x 3 + nlist: paddle.Tensor, # nf x nloc x nnei + nlist_mask: paddle.Tensor, # nf x nloc x nnei + sw: paddle.Tensor, # switch func, nf x nloc x nnei + ): + """ + Parameters + ---------- + g1_ext : nf x nall x ng1 extended single-atom channel + g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant + h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant + nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) + nlist_mask : nf x nloc x nnei masks of the neighbor list. real nei 1 otherwise 0 + sw : nf x nloc x nnei switch function + + Returns + ------- + g1: nf x nloc x ng1 updated single-atom channel + g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant + h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant + """ + cal_gg1 = ( + self.update_g1_has_drrd + or self.update_g1_has_conv + or self.update_g1_has_attn + or self.update_g2_has_g1g1 + ) + + nb, nloc, nnei, _ = g2.shape + nall = g1_ext.shape[1] + g1, _ = paddle.split(g1_ext, [nloc, nall - nloc], axis=1) + if paddle.in_dynamic_mode(): + assert [nb, nloc] == g1.shape[:2] + if paddle.in_dynamic_mode(): + assert [nb, nloc, nnei] == h2.shape[:3] + + g2_update: list[paddle.Tensor] = [g2] + h2_update: list[paddle.Tensor] = [h2] + g1_update: list[paddle.Tensor] = [g1] + g1_mlp: list[paddle.Tensor] = [g1] if not self.g1_out_mlp else [] + if self.g1_out_mlp: + if paddle.in_dynamic_mode(): + assert self.g1_self_mlp is not None + g1_self_mlp = self.act(self.g1_self_mlp(g1)) + g1_update.append(g1_self_mlp) + + if cal_gg1: + gg1 = _make_nei_g1(g1_ext, nlist) + else: + gg1 = None + + if self.update_chnnl_2: + # mlp(g2) + if paddle.in_dynamic_mode(): + assert self.linear2 is not None + # nb x nloc x nnei x ng2 + g2_1 = self.act(self.linear2(g2)) + g2_update.append(g2_1) + + if self.update_g2_has_g1g1: + # linear(g1_i * g1_j) + if paddle.in_dynamic_mode(): + assert gg1 is not None + if paddle.in_dynamic_mode(): + assert self.proj_g1g1g2 is not None + g2_update.append( + self.proj_g1g1g2(self._update_g2_g1g1(g1, gg1, nlist_mask, sw)) + ) + + if self.update_g2_has_attn or self.update_h2: + # gated_attention(g2, h2) + if paddle.in_dynamic_mode(): + assert self.attn2g_map is not None + # nb x nloc x nnei x nnei x nh + AAg = self.attn2g_map(g2, h2, nlist_mask, sw) + + if self.update_g2_has_attn: + if paddle.in_dynamic_mode(): + assert self.attn2_mh_apply is not None + if paddle.in_dynamic_mode(): + assert self.attn2_lm is not None + # nb x nloc x nnei x ng2 + g2_2 = self.attn2_mh_apply(AAg, g2) + g2_2 = self.attn2_lm(g2_2) + g2_update.append(g2_2) + + if self.update_h2: + # linear_head(attention_weights * h2) + h2_update.append(self._update_h2(h2, AAg)) + + if self.update_g1_has_conv: + if paddle.in_dynamic_mode(): + assert gg1 is not None + g1_conv = self._update_g1_conv(gg1, g2, nlist_mask, sw) + if not self.g1_out_conv: + g1_mlp.append(g1_conv) + else: + g1_update.append(g1_conv) + + if self.update_g1_has_grrg: + g1_mlp.append( + self.symmetrization_op( + g2, + h2, + nlist_mask, + sw, + self.axis_neuron, + smooth=self.smooth, + epsilon=self.epsilon, + ) + ) + + if self.update_g1_has_drrd: + if paddle.in_dynamic_mode(): + assert gg1 is not None + g1_mlp.append( + self.symmetrization_op( + gg1, + h2, + nlist_mask, + sw, + self.axis_neuron, + smooth=self.smooth, + epsilon=self.epsilon, + ) + ) + + # nb x nloc x [ng1+ng2+(axisxng2)+(axisxng1)] + # conv grrg drrd + g1_1 = self.act(self.linear1(paddle.concat(g1_mlp, axis=-1))) + g1_update.append(g1_1) + + if self.update_g1_has_attn: + assert gg1 is not None + assert self.loc_attn is not None + g1_update.append(self.loc_attn(g1, gg1, nlist_mask, sw)) + + # update + if self.update_chnnl_2: + g2_new = self.list_update(g2_update, "g2") + h2_new = self.list_update(h2_update, "h2") + else: + g2_new, h2_new = g2, h2 + g1_new = self.list_update(g1_update, "g1") + return g1_new, g2_new, h2_new + + def list_update_res_avg( + self, + update_list: list[paddle.Tensor], + ) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + for ii in range(1, nitem): + uu = uu + update_list[ii] + return uu / (float(nitem) ** 0.5) + + def list_update_res_incr(self, update_list: list[paddle.Tensor]) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 + for ii in range(1, nitem): + uu = uu + scale * update_list[ii] + return uu + + def list_update_res_residual( + self, update_list: list[paddle.Tensor], update_name: str = "g1" + ) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + # make jit happy + if update_name == "g1": + for ii, vv in enumerate(self.g1_residual): + uu = uu + vv * update_list[ii + 1] + elif update_name == "g2": + for ii, vv in enumerate(self.g2_residual): + uu = uu + vv * update_list[ii + 1] + elif update_name == "h2": + for ii, vv in enumerate(self.h2_residual): + uu = uu + vv * update_list[ii + 1] + else: + raise NotImplementedError + return uu + + def list_update( + self, update_list: list[paddle.Tensor], update_name: str = "g1" + ) -> paddle.Tensor: + if self.update_style == "res_avg": + return self.list_update_res_avg(update_list) + elif self.update_style == "res_incr": + return self.list_update_res_incr(update_list) + elif self.update_style == "res_residual": + return self.list_update_res_residual(update_list, update_name=update_name) + else: + raise RuntimeError(f"unknown update style {self.update_style}") + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + data = { + "@class": "RepformerLayer", + "@version": 2, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel, + "ntypes": self.ntypes, + "g1_dim": self.g1_dim, + "g2_dim": self.g2_dim, + "axis_neuron": self.axis_neuron, + "update_chnnl_2": self.update_chnnl_2, + "update_g1_has_conv": self.update_g1_has_conv, + "update_g1_has_drrd": self.update_g1_has_drrd, + "update_g1_has_grrg": self.update_g1_has_grrg, + "update_g1_has_attn": self.update_g1_has_attn, + "update_g2_has_g1g1": self.update_g2_has_g1g1, + "update_g2_has_attn": self.update_g2_has_attn, + "update_h2": self.update_h2, + "attn1_hidden": self.attn1_hidden, + "attn1_nhead": self.attn1_nhead, + "attn2_hidden": self.attn2_hidden, + "attn2_nhead": self.attn2_nhead, + "attn2_has_gate": self.attn2_has_gate, + "activation_function": self.activation_function, + "update_style": self.update_style, + "smooth": self.smooth, + "precision": self.precision, + "trainable_ln": self.trainable_ln, + "use_sqrt_nnei": self.use_sqrt_nnei, + "g1_out_conv": self.g1_out_conv, + "g1_out_mlp": self.g1_out_mlp, + "ln_eps": self.ln_eps, + "linear1": self.linear1.serialize(), + } + if self.update_chnnl_2: + data.update( + { + "linear2": self.linear2.serialize(), + } + ) + if self.update_g1_has_conv: + data.update( + { + "proj_g1g2": self.proj_g1g2.serialize(), + } + ) + if self.update_g2_has_g1g1: + data.update( + { + "proj_g1g1g2": self.proj_g1g1g2.serialize(), + } + ) + if self.update_g2_has_attn or self.update_h2: + data.update( + { + "attn2g_map": self.attn2g_map.serialize(), + } + ) + if self.update_g2_has_attn: + data.update( + { + "attn2_mh_apply": self.attn2_mh_apply.serialize(), + "attn2_lm": self.attn2_lm.serialize(), + } + ) + + if self.update_h2: + data.update( + { + "attn2_ev_apply": self.attn2_ev_apply.serialize(), + } + ) + if self.update_g1_has_attn: + data.update( + { + "loc_attn": self.loc_attn.serialize(), + } + ) + if self.g1_out_mlp: + data.update( + { + "g1_self_mlp": self.g1_self_mlp.serialize(), + } + ) + if self.update_style == "res_residual": + data.update( + { + "@variables": { + "g1_residual": [to_numpy_array(t) for t in self.g1_residual], + "g2_residual": [to_numpy_array(t) for t in self.g2_residual], + "h2_residual": [to_numpy_array(t) for t in self.h2_residual], + } + } + ) + return data + + @classmethod + def deserialize(cls, data: dict) -> "RepformerLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 2, 1) + data.pop("@class") + linear1 = data.pop("linear1") + update_chnnl_2 = data["update_chnnl_2"] + update_g1_has_conv = data["update_g1_has_conv"] + update_g2_has_g1g1 = data["update_g2_has_g1g1"] + update_g2_has_attn = data["update_g2_has_attn"] + update_h2 = data["update_h2"] + update_g1_has_attn = data["update_g1_has_attn"] + update_style = data["update_style"] + g1_out_mlp = data["g1_out_mlp"] + + linear2 = data.pop("linear2", None) + proj_g1g2 = data.pop("proj_g1g2", None) + proj_g1g1g2 = data.pop("proj_g1g1g2", None) + attn2g_map = data.pop("attn2g_map", None) + attn2_mh_apply = data.pop("attn2_mh_apply", None) + attn2_lm = data.pop("attn2_lm", None) + attn2_ev_apply = data.pop("attn2_ev_apply", None) + loc_attn = data.pop("loc_attn", None) + g1_self_mlp = data.pop("g1_self_mlp", None) + variables = data.pop("@variables", {}) + g1_residual = variables.get("g1_residual", data.pop("g1_residual", [])) + g2_residual = variables.get("g2_residual", data.pop("g2_residual", [])) + h2_residual = variables.get("h2_residual", data.pop("h2_residual", [])) + + obj = cls(**data) + obj.linear1 = MLPLayer.deserialize(linear1) + if update_chnnl_2: + assert isinstance(linear2, dict) + obj.linear2 = MLPLayer.deserialize(linear2) + if update_g1_has_conv: + assert isinstance(proj_g1g2, dict) + obj.proj_g1g2 = MLPLayer.deserialize(proj_g1g2) + if update_g2_has_g1g1: + assert isinstance(proj_g1g1g2, dict) + obj.proj_g1g1g2 = MLPLayer.deserialize(proj_g1g1g2) + if update_g2_has_attn or update_h2: + assert isinstance(attn2g_map, dict) + obj.attn2g_map = Atten2Map.deserialize(attn2g_map) + if update_g2_has_attn: + assert isinstance(attn2_mh_apply, dict) + assert isinstance(attn2_lm, dict) + obj.attn2_mh_apply = Atten2MultiHeadApply.deserialize(attn2_mh_apply) + obj.attn2_lm = LayerNorm.deserialize(attn2_lm) + if update_h2: + assert isinstance(attn2_ev_apply, dict) + obj.attn2_ev_apply = Atten2EquiVarApply.deserialize(attn2_ev_apply) + if update_g1_has_attn: + assert isinstance(loc_attn, dict) + obj.loc_attn = LocalAtten.deserialize(loc_attn) + if g1_out_mlp: + assert isinstance(g1_self_mlp, dict) + obj.g1_self_mlp = MLPLayer.deserialize(g1_self_mlp) + if update_style == "res_residual": + for ii, t in enumerate(obj.g1_residual): + t.data = to_paddle_tensor(g1_residual[ii]) + for ii, t in enumerate(obj.g2_residual): + t.data = to_paddle_tensor(g2_residual[ii]) + for ii, t in enumerate(obj.h2_residual): + t.data = to_paddle_tensor(h2_residual[ii]) + return obj diff --git a/deepmd/pd/model/descriptor/repformers.py b/deepmd/pd/model/descriptor/repformers.py new file mode 100644 index 0000000000..9340f60067 --- /dev/null +++ b/deepmd/pd/model/descriptor/repformers.py @@ -0,0 +1,580 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.mlp import ( + MLPLayer, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.spin import ( + concat_switch_virtual, +) +from deepmd.pd.utils.utils import ( + ActivationFn, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) + +from .repformer_layer import ( + RepformerLayer, +) + + +@DescriptorBlock.register("se_repformer") +@DescriptorBlock.register("se_uni") +class DescrptBlockRepformers(DescriptorBlock): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + nlayers: int = 3, + g1_dim=128, + g2_dim=16, + axis_neuron: int = 4, + direct_dist: bool = False, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation_function: str = "tanh", + update_style: str = "res_avg", + update_residual: float = 0.001, + update_residual_init: str = "norm", + set_davg_zero: bool = True, + smooth: bool = True, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + precision: str = "float64", + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + seed: Optional[Union[int, list[int]]] = None, + use_sqrt_nnei: bool = True, + g1_out_conv: bool = True, + g1_out_mlp: bool = True, + ) -> None: + r""" + The repformer descriptor block. + + Parameters + ---------- + rcut : float + The cut-off radius. + rcut_smth : float + Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth. + sel : int + Maximally possible number of selected neighbors. + ntypes : int + Number of element types + nlayers : int, optional + Number of repformer layers. + g1_dim : int, optional + Dimension of the first graph convolution layer. + g2_dim : int, optional + Dimension of the second graph convolution layer. + axis_neuron : int, optional + Size of the submatrix of G (embedding matrix). + direct_dist : bool, optional + Whether to use direct distance information (1/r term) in the repformer block. + update_g1_has_conv : bool, optional + Whether to update the g1 rep with convolution term. + update_g1_has_drrd : bool, optional + Whether to update the g1 rep with the drrd term. + update_g1_has_grrg : bool, optional + Whether to update the g1 rep with the grrg term. + update_g1_has_attn : bool, optional + Whether to update the g1 rep with the localized self-attention. + update_g2_has_g1g1 : bool, optional + Whether to update the g2 rep with the g1xg1 term. + update_g2_has_attn : bool, optional + Whether to update the g2 rep with the gated self-attention. + update_h2 : bool, optional + Whether to update the h2 rep. + attn1_hidden : int, optional + The hidden dimension of localized self-attention to update the g1 rep. + attn1_nhead : int, optional + The number of heads in localized self-attention to update the g1 rep. + attn2_hidden : int, optional + The hidden dimension of gated self-attention to update the g2 rep. + attn2_nhead : int, optional + The number of heads in gated self-attention to update the g2 rep. + attn2_has_gate : bool, optional + Whether to use gate in the gated self-attention to update the g2 rep. + activation_function : str, optional + The activation function in the embedding net. + update_style : str, optional + Style to update a representation. + Supported options are: + -'res_avg': Updates a rep `u` with: u = 1/\\sqrt{n+1} (u + u_1 + u_2 + ... + u_n) + -'res_incr': Updates a rep `u` with: u = u + 1/\\sqrt{n} (u_1 + u_2 + ... + u_n) + -'res_residual': Updates a rep `u` with: u = u + (r1*u_1 + r2*u_2 + ... + r3*u_n) + where `r1`, `r2` ... `r3` are residual weights defined by `update_residual` + and `update_residual_init`. + update_residual : float, optional + When update using residual mode, the initial std of residual vector weights. + update_residual_init : str, optional + When update using residual mode, the initialization mode of residual vector weights. + set_davg_zero : bool, optional + Set the normalization average to zero. + precision : str, optional + The precision of the embedding net parameters. + smooth : bool, optional + Whether to use smoothness in processes such as attention weights calculation. + exclude_types : list[list[int]], optional + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float, optional + Protection parameter to prevent division by zero errors during environment matrix calculations. + For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection. + trainable_ln : bool, optional + Whether to use trainable shift and scale weights in layer normalization. + use_sqrt_nnei : bool, optional + Whether to use the square root of the number of neighbors for symmetrization_op normalization instead of using the number of neighbors directly. + g1_out_conv : bool, optional + Whether to put the convolutional update of g1 separately outside the concatenated MLP update. + g1_out_mlp : bool, optional + Whether to put the self MLP update of g1 separately outside the concatenated MLP update. + ln_eps : float, optional + The epsilon value for layer normalization. + seed : int, optional + Random seed for parameter initialization. + """ + super().__init__() + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.ntypes = ntypes + self.nlayers = nlayers + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 # use full descriptor. + assert len(sel) == 1 + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.axis_neuron = axis_neuron + self.set_davg_zero = set_davg_zero + self.g1_dim = g1_dim + self.g2_dim = g2_dim + self.update_g1_has_conv = update_g1_has_conv + self.update_g1_has_drrd = update_g1_has_drrd + self.update_g1_has_grrg = update_g1_has_grrg + self.update_g1_has_attn = update_g1_has_attn + self.update_g2_has_g1g1 = update_g2_has_g1g1 + self.update_g2_has_attn = update_g2_has_attn + self.update_h2 = update_h2 + self.attn1_hidden = attn1_hidden + self.attn1_nhead = attn1_nhead + self.attn2_has_gate = attn2_has_gate + self.attn2_hidden = attn2_hidden + self.attn2_nhead = attn2_nhead + self.activation_function = activation_function + self.update_style = update_style + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.direct_dist = direct_dist + self.act = ActivationFn(activation_function) + self.smooth = smooth + self.use_sqrt_nnei = use_sqrt_nnei + self.g1_out_conv = g1_out_conv + self.g1_out_mlp = g1_out_mlp + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + self.env_protection = env_protection + self.precision = precision + self.prec = PRECISION_DICT[precision] + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.epsilon = 1e-4 + self.seed = seed + + self.g2_embd = MLPLayer( + 1, self.g2_dim, precision=precision, seed=child_seed(seed, 0) + ) + layers = [] + for ii in range(nlayers): + layers.append( + RepformerLayer( + self.rcut, + self.rcut_smth, + self.sel, + self.ntypes, + self.g1_dim, + self.g2_dim, + axis_neuron=self.axis_neuron, + update_chnnl_2=(ii != nlayers - 1), + update_g1_has_conv=self.update_g1_has_conv, + update_g1_has_drrd=self.update_g1_has_drrd, + update_g1_has_grrg=self.update_g1_has_grrg, + update_g1_has_attn=self.update_g1_has_attn, + update_g2_has_g1g1=self.update_g2_has_g1g1, + update_g2_has_attn=self.update_g2_has_attn, + update_h2=self.update_h2, + attn1_hidden=self.attn1_hidden, + attn1_nhead=self.attn1_nhead, + attn2_has_gate=self.attn2_has_gate, + attn2_hidden=self.attn2_hidden, + attn2_nhead=self.attn2_nhead, + activation_function=self.activation_function, + update_style=self.update_style, + update_residual=self.update_residual, + update_residual_init=self.update_residual_init, + smooth=self.smooth, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + precision=precision, + use_sqrt_nnei=self.use_sqrt_nnei, + g1_out_conv=self.g1_out_conv, + g1_out_mlp=self.g1_out_mlp, + seed=child_seed(child_seed(seed, 1), ii), + ) + ) + self.layers = paddle.nn.LayerList(layers) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.stats = None + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_emb(self) -> int: + """Returns the embedding dimension g2.""" + return self.g2_dim + + def __setitem__(self, key, value) -> None: + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.get_dim_emb() + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + if comm_dict is None: + assert mapping is not None + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + atype = extended_atype[:, :nloc] + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + # nb x nloc x nnei x 4, nb x nloc x nnei x 3, nb x nloc x nnei x 1 + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + nlist_mask = nlist != -1 + sw = paddle.squeeze(sw, -1) + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + + # [nframes, nloc, tebd_dim] + if comm_dict is None: + if paddle.in_dynamic_mode(): + assert isinstance(extended_atype_embd, paddle.Tensor) # for jit + atype_embd = extended_atype_embd[:, :nloc, :] + if paddle.in_dynamic_mode(): + assert list(atype_embd.shape) == [nframes, nloc, self.g1_dim] + else: + atype_embd = extended_atype_embd + if paddle.in_dynamic_mode(): + assert isinstance(atype_embd, paddle.Tensor) # for jit + g1 = self.act(atype_embd) + ng1 = g1.shape[-1] + # nb x nloc x nnei x 1, nb x nloc x nnei x 3 + if not self.direct_dist: + g2, h2 = paddle.split(dmatrix, [1, 3], axis=-1) + else: + # g2, h2 = paddle.linalg.norm(diff, axis=-1, keepdim=True), diff + g2, h2 = decomp.norm(diff, axis=-1, keepdim=True), diff + g2 = g2 / self.rcut + h2 = h2 / self.rcut + # nb x nloc x nnei x ng2 + g2 = self.act(self.g2_embd(g2)) + + # set all padding positions to index of 0 + # if the a neighbor is real or not is indicated by nlist_mask + nlist[nlist == -1] = 0 + # nb x nall x ng1 + if comm_dict is None: + assert mapping is not None + mapping = ( + mapping.reshape([nframes, nall]) + .unsqueeze(-1) + .expand([-1, -1, self.g1_dim]) + ) + for idx, ll in enumerate(self.layers): + # g1: nb x nloc x ng1 + # g1_ext: nb x nall x ng1 + if comm_dict is None: + assert mapping is not None + g1_ext = decomp.take_along_axis(g1, axis=1, indices=mapping) + else: + raise NotImplementedError("Not impl yet") + has_spin = "has_spin" in comm_dict + if not has_spin: + n_padding = nall - nloc + g1 = paddle.nn.functional.pad( + g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 + ) + real_nloc = nloc + real_nall = nall + else: + # for spin + real_nloc = nloc // 2 + real_nall = nall // 2 + real_n_padding = real_nall - real_nloc + g1_real, g1_virtual = paddle.split( + g1, [real_nloc, real_nloc], axis=1 + ) + # mix_g1: nb x real_nloc x (ng1 * 2) + mix_g1 = paddle.concat([g1_real, g1_virtual], axis=2) + # nb x real_nall x (ng1 * 2) + g1 = paddle.nn.functional.pad( + mix_g1.squeeze(0), (0, 0, 0, real_n_padding), value=0.0 + ) + + assert "send_list" in comm_dict + assert "send_proc" in comm_dict + assert "recv_proc" in comm_dict + assert "send_num" in comm_dict + assert "recv_num" in comm_dict + assert "communicator" in comm_dict + ret = paddle.ops.deepmd.border_op( + comm_dict["send_list"], + comm_dict["send_proc"], + comm_dict["recv_proc"], + comm_dict["send_num"], + comm_dict["recv_num"], + g1, + comm_dict["communicator"], + paddle.to_tensor( + real_nloc, + dtype=paddle.int32, + place=env.DEVICE, + ), # should be int of c++ + paddle.to_tensor( + real_nall - real_nloc, + dtype=paddle.int32, + place=env.DEVICE, + ), # should be int of c++ + ) + g1_ext = ret[0].unsqueeze(0) + if has_spin: + g1_real_ext, g1_virtual_ext = paddle.split( + g1_ext, [ng1, ng1], axis=2 + ) + g1_ext = concat_switch_virtual( + g1_real_ext, g1_virtual_ext, real_nloc + ) + g1, g2, h2 = ll.forward( + g1_ext, + g2, + h2, + nlist, + nlist_mask, + sw, + ) + + # nb x nloc x 3 x ng2 + h2g2 = RepformerLayer._cal_hg( + g2, + h2, + nlist_mask, + sw, + smooth=self.smooth, + epsilon=self.epsilon, + use_sqrt_nnei=self.use_sqrt_nnei, + ) + # (nb x nloc) x ng2 x 3 + rot_mat = paddle.transpose(h2g2, (0, 1, 3, 2)) + + return g1, g2, h2, rot_mat.reshape([nframes, nloc, self.dim_emb, 3]), sw + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return True + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py new file mode 100644 index 0000000000..31fb06045e --- /dev/null +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -0,0 +1,932 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) + + +@BaseDescriptor.register("se_e3_tebd") +class DescrptSeTTebd(BaseDescriptor, paddle.nn.Layer): + r"""Construct an embedding net that takes angles between two neighboring atoms and type embeddings as input. + + Parameters + ---------- + rcut + The cut-off radius + rcut_smth + From where the environment matrix should be smoothed + sel : Union[list[int], int] + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + tebd_dim : int + Dimension of the type embedding + tebd_input_mode : str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed angular information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the angular embedding network output. + resnet_dt + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + set_davg_zero + Set the shift of embedding net input to zero. + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + env_protection: float + Protection parameter to prevent division by zero errors during environment matrix calculations. + exclude_types : list[tuple[int, int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + concat_output_tebd: bool + Whether to concat type embedding at the output of the descriptor. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + smooth: bool + Whether to use smooth process in calculation. + + """ + + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [2, 4, 8], + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + resnet_dt: bool = False, + set_davg_zero: bool = True, + activation_function: str = "tanh", + env_protection: float = 0.0, + exclude_types: list[tuple[int, int]] = [], + precision: str = "float64", + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, + concat_output_tebd: bool = True, + use_econf_tebd: bool = False, + use_tebd_bias=False, + smooth: bool = True, + ) -> None: + super().__init__() + self.se_ttebd = DescrptBlockSeTTebd( + rcut, + rcut_smth, + sel, + ntypes, + neuron=neuron, + tebd_dim=tebd_dim, + tebd_input_mode=tebd_input_mode, + set_davg_zero=set_davg_zero, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + exclude_types=exclude_types, + env_protection=env_protection, + smooth=smooth, + seed=child_seed(seed, 1), + ) + self.prec = PRECISION_DICT[precision] + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + self.smooth = smooth + self.type_embedding = TypeEmbedNet( + ntypes, + tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=use_econf_tebd, + type_map=type_map, + use_tebd_bias=use_tebd_bias, + ) + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.concat_output_tebd = concat_output_tebd + self.trainable = trainable + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.se_ttebd.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.se_ttebd.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.se_ttebd.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.se_ttebd.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.se_ttebd.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + ret = self.se_ttebd.get_dim_out() + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + return self.se_ttebd.dim_emb + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return self.se_ttebd.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.se_ttebd.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.se_ttebd.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.se_ttebd.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False) -> None: + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA1 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in both type_embedding and se_ttebd + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.se_ttebd.share_params(base_class.se_ttebd, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding + elif shared_level == 1: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.se_ttebd.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.se_ttebd.mean = mean + self.se_ttebd.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.se_ttebd.mean, self.se_ttebd.stddev + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_ttebd + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_ttebd + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + + def serialize(self) -> dict: + obj = self.se_ttebd + data = { + "@class": "Descriptor", + "type": "se_e3_tebd", + "@version": 1, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "type_embedding": self.type_embedding.embedding.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "smooth": self.smooth, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "trainable": self.trainable, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.filter_layers_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeTTebd": + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + type_embedding = data.pop("type_embedding") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.se_ttebd.prec).to(device=env.DEVICE) + + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + obj.se_ttebd["davg"] = t_cvt(variables["davg"]) + obj.se_ttebd["dstd"] = t_cvt(variables["dstd"]) + obj.se_ttebd.filter_layers = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + obj.se_ttebd.filter_layers_strip = NetworkCollection.deserialize( + embeddings_strip + ) + return obj + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) + del mapping + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None + g1, _, _, _, sw = self.se_ttebd( + nlist, + extended_coord, + extended_atype, + g1_ext, + mapping=None, + type_embedding=type_embedding, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + None, + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, sel = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], True + ) + local_jdata_cpy["sel"] = sel[0] + return local_jdata_cpy, min_nbor_dist + + +@DescriptorBlock.register("se_ttebd") +class DescrptBlockSeTTebd(DescriptorBlock): + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + activation_function="tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + smooth: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.neuron = neuron + self.filter_neuron = self.neuron + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.set_davg_zero = set_davg_zero + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.env_protection = env_protection + self.seed = seed + self.smooth = smooth + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.tebd_dim_input = self.tebd_dim * 2 + if self.tebd_input_mode in ["concat"]: + self.embd_input_dim = 1 + self.tebd_dim_input + else: + self.embd_input_dim = 1 + + self.filter_layers = None + self.filter_layers_strip = None + filter_layers = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers[0] = EmbeddingNet( + self.embd_input_dim, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 1), + ) + self.filter_layers = filter_layers + if self.tebd_input_mode in ["strip"]: + filter_layers_strip = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers_strip[0] = EmbeddingNet( + self.tebd_dim_input, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 2), + ) + self.filter_layers_strip = filter_layers_strip + self.stats = None + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_emb(self) -> int: + """Returns the output dimension of embedding.""" + return self.filter_neuron[-1] + + def __setitem__(self, key, value) -> None: + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.tebd_dim + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall x nt + extended_atype_embd + The extended type embedding of atoms. shape: nf x nall + mapping + The index mapping, not required by this descriptor. + type_embedding + Full type embeddings. shape: (ntypes+1) x nt + Required for stripped type embeddings. + + Returns + ------- + result + The descriptor. shape: nf x nloc x (ng x axis_neuron) + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + del mapping + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + nb = nframes + nall = extended_coord.reshape([nb, -1, 3]).shape[1] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + nlist_mask = nlist != -1 + nlist = paddle.where(nlist == -1, paddle.zeros_like(nlist), nlist) + sw = paddle.squeeze(sw, -1) + # nf x nall x nt + nt = extended_atype_embd.shape[-1] + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + # (nb x nloc) x nnei + exclude_mask = exclude_mask.reshape([nb * nloc, nnei]) + assert self.filter_layers is not None + # nfnl x nnei x 4 + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + nfnl = dmatrix.shape[0] + # nfnl x nnei x 4 + rr = dmatrix + rr = rr * exclude_mask[:, :, None].astype(rr.dtype) + + # nfnl x nt_i x 3 + rr_i = rr[:, :, 1:] + # nfnl x nt_j x 3 + rr_j = rr[:, :, 1:] + # nfnl x nt_i x nt_j + # env_ij = paddle.einsum("ijm,ikm->ijk", rr_i, rr_j) + env_ij = ( + # ij1m x i1km -> ijkm -> ijk + rr_i.unsqueeze(2) * rr_j.unsqueeze(1) + ).sum(-1) + # nfnl x nt_i x nt_j x 1 + ss = env_ij.unsqueeze(-1) + if self.tebd_input_mode in ["concat"]: + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) + # nb x (nloc x nnei) x nt + # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) + atype_tebd_nlist = decomp.take_along_axis( + atype_tebd_ext, axis=1, indices=index + ) + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) + # nfnl x nnei x tebd_dim + nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) + # nfnl x nt_i x nt_j x tebd_dim + nlist_tebd_i = nlist_tebd.unsqueeze(2).expand([-1, -1, self.nnei, -1]) + nlist_tebd_j = nlist_tebd.unsqueeze(1).expand([-1, self.nnei, -1, -1]) + # nfnl x nt_i x nt_j x (1 + tebd_dim * 2) + ss = paddle.concat([ss, nlist_tebd_i, nlist_tebd_j], axis=-1) + # nfnl x nt_i x nt_j x ng + gg = self.filter_layers.networks[0](ss) + elif self.tebd_input_mode in ["strip"]: + # nfnl x nt_i x nt_j x ng + gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + assert type_embedding is not None + ng = self.filter_neuron[-1] + ntypes_with_padding = type_embedding.shape[0] + # nf x (nl x nnei) + nlist_index = nlist.reshape([nb, nloc * nnei]) + # nf x (nl x nnei) + nei_type = decomp.take_along_axis( + extended_atype, indices=nlist_index, axis=1 + ) + # nfnl x nnei + nei_type = nei_type.reshape([nfnl, nnei]) + # nfnl x nnei x nnei + nei_type_i = nei_type.unsqueeze(2).expand([-1, -1, nnei]) + nei_type_j = nei_type.unsqueeze(1).expand([-1, nnei, -1]) + idx_i = nei_type_i * ntypes_with_padding + idx_j = nei_type_j + # (nf x nl x nt_i x nt_j) x ng + idx = ( + (idx_i + idx_j) + .reshape([-1, 1]) + .expand([-1, ng]) + .astype(paddle.int64) + .to(paddle.int64) + ) + # ntypes * (ntypes) * nt + type_embedding_i = paddle.tile( + type_embedding.reshape([ntypes_with_padding, 1, nt]), + [1, ntypes_with_padding, 1], + ) + # (ntypes) * ntypes * nt + type_embedding_j = paddle.tile( + type_embedding.reshape([1, ntypes_with_padding, nt]), + [ntypes_with_padding, 1, 1], + ) + # (ntypes * ntypes) * (nt+nt) + two_side_type_embedding = paddle.concat( + [type_embedding_i, type_embedding_j], -1 + ).reshape(-1, nt * 2) + tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) + # (nfnl x nt_i x nt_j) x ng + gg_t = decomp.take_along_axis(tt_full, indices=idx, axis=0) + # (nfnl x nt_i x nt_j) x ng + gg_t = gg_t.reshape(nfnl, nnei, nnei, ng) + if self.smooth: + gg_t = ( + gg_t + * sw.reshape([nfnl, self.nnei, 1, 1]) + * sw.reshape([nfnl, 1, self.nnei, 1]) + ) + # nfnl x nt_i x nt_j x ng + gg = gg_s * gg_t + gg_s + else: + raise NotImplementedError + + # nfnl x ng + # res_ij = paddle.einsum("ijk,ijkm->im", env_ij, gg) + res_ij = ( + # ijk1 x ijkm -> ijkm -> im + env_ij.unsqueeze(-1) * gg + ).sum([1, 2]) + res_ij = res_ij * (1.0 / float(self.nnei) / float(self.nnei)) + # nf x nl x ng + result = res_ij.reshape([nframes, nloc, self.filter_neuron[-1]]) + return ( + result, + None, + None, + None, + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/utils/spin.py b/deepmd/pd/utils/spin.py new file mode 100644 index 0000000000..934fb3762a --- /dev/null +++ b/deepmd/pd/utils/spin.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + + +def concat_switch_virtual( + extended_tensor, + extended_tensor_virtual, + nloc: int, +): + """ + Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. + - [:, :nloc]: original nloc real atoms. + - [:, nloc: nloc + nloc]: virtual atoms corresponding to nloc real atoms. + - [:, nloc + nloc: nloc + nall]: ghost real atoms. + - [:, nloc + nall: nall + nall]: virtual atoms corresponding to ghost real atoms. + """ + nframes, nall = extended_tensor.shape[:2] + out_shape = list(extended_tensor.shape) + out_shape[1] *= 2 + extended_tensor_updated = paddle.zeros( + out_shape, + dtype=extended_tensor.dtype, + device=extended_tensor.place, + ) + extended_tensor_updated[:, :nloc] = extended_tensor[:, :nloc] + extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[:, :nloc] + extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[:, nloc:] + extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] + return extended_tensor_updated.reshape(out_shape) From c944b82c64e4872d6fd3cea0166c05b5e8318913 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 28 Nov 2024 20:50:49 +0800 Subject: [PATCH 43/58] restore decomp to paddle function --- .pre-commit-config.yaml | 52 ++++++++--------- deepmd/pd/loss/ener.py | 6 +- deepmd/pd/model/descriptor/env_mat.py | 9 +-- deepmd/pd/model/descriptor/se_atten.py | 6 +- deepmd/pd/model/model/make_model.py | 10 +--- deepmd/pd/train/training.py | 24 ++++---- deepmd/pd/utils/cache.py | 31 ---------- deepmd/pd/utils/decomp.py | 81 -------------------------- deepmd/pd/utils/exclude_mask.py | 8 +-- deepmd/pd/utils/nlist.py | 28 +++------ deepmd/pd/utils/region.py | 13 +---- 11 files changed, 57 insertions(+), 211 deletions(-) delete mode 100644 deepmd/pd/utils/cache.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7aa2012200..fb1a5ff907 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/pd/loss/ener.py b/deepmd/pd/loss/ener.py index 7c5d848b45..73ad53601a 100644 --- a/deepmd/pd/loss/ener.py +++ b/deepmd/pd/loss/ener.py @@ -10,7 +10,6 @@ TaskLoss, ) from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.env import ( @@ -224,10 +223,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if self.relative_f is not None: force_label_3 = force_label.reshape([-1, 3]) - # norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f - norm_f = ( - decomp.norm(force_label_3, axis=1, keepdim=True) + self.relative_f - ) + norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f diff_f_3 = diff_f.reshape([-1, 3]) diff_f_3 = diff_f_3 / norm_f diff_f = diff_f_3.reshape([-1]) diff --git a/deepmd/pd/model/descriptor/env_mat.py b/deepmd/pd/model/descriptor/env_mat.py index 3a9daec1e8..9b72da0b16 100644 --- a/deepmd/pd/model/descriptor/env_mat.py +++ b/deepmd/pd/model/descriptor/env_mat.py @@ -2,9 +2,6 @@ import paddle -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.preprocess import ( compute_smooth_weight, ) @@ -27,12 +24,10 @@ def _make_env_mat( nlist = paddle.where(mask, nlist, nall - 1) coord_l = coord[:, :natoms].reshape([bsz, -1, 1, 3]) index = nlist.reshape([bsz, -1]).unsqueeze(-1).expand([-1, -1, 3]) - # coord_r = paddle.take_along_axis(coord, axis=1, indices=index) - coord_r = decomp.take_along_axis(coord, axis=1, indices=index) + coord_r = paddle.take_along_axis(coord, axis=1, indices=index) coord_r = coord_r.reshape([bsz, natoms, nnei, 3]) diff = coord_r - coord_l - # length = paddle.linalg.norm(diff, axis=-1, keepdim=True) - length = decomp.norm(diff, axis=-1, keepdim=True) + length = paddle.linalg.norm(diff, axis=-1, keepdim=True) # for index 0 nloc atom length = length + (~mask.unsqueeze(-1)).astype(length.dtype) t0 = 1 / (length + protection) diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py index 89407e6923..ae76d331fd 100644 --- a/deepmd/pd/model/descriptor/se_atten.py +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -27,7 +27,6 @@ NetworkCollection, ) from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.env import ( @@ -481,8 +480,7 @@ def forward( # nb x (nloc x nnei) x nt index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) # nb x (nloc x nnei) x nt - # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) - atype_tebd_nlist = decomp.take_along_axis(atype_tebd_ext, axis=1, indices=index) + atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, indices=index) # nb x nloc x nnei x nt atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) # beyond the cutoff sw should be 0.0 @@ -537,7 +535,7 @@ def forward( gg_t = gg_t * sw.reshape([-1, self.nnei, 1]) # nfnl x nnei x ng gg = gg_s * gg_t + gg_s - input_r = decomp.normalize( + input_r = paddle_func.normalize( rr.reshape([-1, self.nnei, 4])[:, :, 1:4], axis=-1 ) gg = self.dpa1_attention( diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index d5c5c6bd41..ce930a51d6 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -24,9 +24,6 @@ communicate_extended_output, fit_output_to_model_output, ) -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.env import ( GLOBAL_PD_ENER_FLOAT_PRECISION, GLOBAL_PD_FLOAT_PRECISION, @@ -459,18 +456,17 @@ def _format_nlist( coord0 = extended_coord[:, :n_nloc, :] # nf x (nloc x nnei) x 3 index = nlist.reshape([n_nf, n_nloc * n_nnei, 1]).expand([-1, -1, 3]) - coord1 = decomp.take_along_axis(extended_coord, axis=1, indices=index) + coord1 = paddle.take_along_axis(extended_coord, axis=1, indices=index) # nf x nloc x nnei x 3 coord1 = coord1.reshape([n_nf, n_nloc, n_nnei, 3]) # nf x nloc x nnei - # rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) - rr = decomp.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) rr = paddle.where(m_real_nei, rr, float("inf")) rr, nlist_mapping = ( paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1), ) - nlist = decomp.take_along_axis(nlist, axis=2, indices=nlist_mapping) + nlist = paddle.take_along_axis(nlist, axis=2, indices=nlist_mapping) nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist) nlist = nlist[..., :nnei] else: # not extra_nlist_sort and n_nnei <= nnei: diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 61f24c1455..0b877ef263 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -80,6 +80,18 @@ ) +def format_training_message( + batch: int, + wall_time: float, + eta: Optional[int] = None, +): + """Format a training message.""" + msg = f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" + if isinstance(eta, int): + msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}" + return msg + + class Trainer: def __init__( self, @@ -1213,15 +1225,3 @@ def model_change_out_bias( f"to {to_numpy_array(new_bias).reshape(-1)!s}." ) return _model - - -def format_training_message( - batch: int, - wall_time: float, - eta: Optional[int] = None, -): - """Format a training message.""" - msg = f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" - if isinstance(eta, int): - msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}" - return msg diff --git a/deepmd/pd/utils/cache.py b/deepmd/pd/utils/cache.py deleted file mode 100644 index c40c4050b7..0000000000 --- a/deepmd/pd/utils/cache.py +++ /dev/null @@ -1,31 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import copy as copy_lib -import functools - - -def lru_cache(maxsize=16, typed=False, copy=False, deepcopy=False): - if deepcopy: - - def decorator(f): - cached_func = functools.lru_cache(maxsize, typed)(f) - - @functools.wraps(f) - def wrapper(*args, **kwargs): - return copy_lib.deepcopy(cached_func(*args, **kwargs)) - - return wrapper - - elif copy: - - def decorator(f): - cached_func = functools.lru_cache(maxsize, typed)(f) - - @functools.wraps(f) - def wrapper(*args, **kwargs): - return copy_lib.copy(cached_func(*args, **kwargs)) - - return wrapper - - else: - decorator = functools.lru_cache(maxsize, typed) - return decorator diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 42f3b9c7d1..0aa38a44f2 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -14,8 +14,6 @@ import paddle __all__ = [ - "norm", - "take_along_axis", "scatter_reduce", "sec", "masked_add_", @@ -23,55 +21,6 @@ ] -def norm_decomp( - x: paddle.Tensor, p: float = 2, axis: bool = -1, keepdim: bool = False -) -> paddle.Tensor: - """Forward decompsition function of norm. - - Parameters - ---------- - x : paddle.Tensor - Input - p : float, default: 2 - Order of norm - axis : bool, default: -1 - Dimensions over which to compute the vector or matrix norm - keepdim : bool, default: False - If set to True, the reduced dimensions are retained in the result as dimensions - with size one - - Returns - ------- - paddle.Tensor - A real-valued tensor, even when A is complex. - """ - return paddle.linalg.norm(x, p=p, axis=axis, keepdim=keepdim) - - -def take_along_axis_decomp( - x: paddle.Tensor, indices: paddle.Tensor, axis: int, broadcast: bool = True -) -> paddle.Tensor: - """Forward decompsition function of take_along_axis. - - Parameters - ---------- - x : paddle.Tensor - The input tensor. - indices : paddle.Tensor - Indices to take along each 1d slice of array. - axis : int - The axis to take 1d slices along. - broadcast : bool, default: True - Whether the indices broadcast. - - Returns - ------- - paddle.Tensor - Computed output. - """ - return paddle.take_along_axis(x, indices, axis, broadcast) - - def scatter_reduce_decomp( input: paddle.Tensor, axis: int, @@ -178,33 +127,6 @@ def masked_add__decomp( return x -def normalize_decomp( - x: paddle.Tensor, - p: float = 2, - axis: int = 1, - epsilon: float = 1e-12, -) -> paddle.Tensor: - """Forward decompsition function of normalize. - - Parameters - ---------- - x : paddle.Tensor - Input tensor. - p : float, optional - Order of the norm, default: 2 - axis : int, optional - Axis on which to perform normalization, default: 1 - epsilon : float, optional - Epislon value, default: 1e-12 - - Returns - ------- - paddle.Tensor - Computed output. - """ - return paddle.nn.functional.normalize(x, p, axis, epsilon) - - def numel(x: paddle.Tensor) -> int: if paddle.in_dynamic_mode(): return np.prod(x.shape) @@ -213,8 +135,5 @@ def numel(x: paddle.Tensor) -> int: # alias for decomposed functions for convinience -normalize = normalize_decomp masked_add_ = masked_add__decomp scatter_reduce = scatter_reduce_decomp -take_along_axis = take_along_axis_decomp -norm = norm_decomp diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py index 088ac186a8..37b5bc79ed 100644 --- a/deepmd/pd/utils/exclude_mask.py +++ b/deepmd/pd/utils/exclude_mask.py @@ -3,9 +3,6 @@ import numpy as np import paddle -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.utils import ( to_paddle_tensor, ) @@ -146,10 +143,7 @@ def forward( type_i = atype_ext[:, :nloc].reshape([nf, nloc]) * (self.ntypes + 1) # nf x nloc x nnei index = paddle.where(nlist == -1, nall, nlist).reshape([nf, nloc * nnei]) - # type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( - # [nf, nloc, nnei] - # ) - type_j = decomp.take_along_axis(ae, axis=1, indices=index).reshape( + type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( [nf, nloc, nnei] ) type_ij = type_i[:, :, None] + type_j diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py index 44924ce07d..ae9db628a1 100644 --- a/deepmd/pd/utils/nlist.py +++ b/deepmd/pd/utils/nlist.py @@ -7,7 +7,6 @@ import paddle from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.region import ( @@ -118,8 +117,7 @@ def build_neighbor_list( if paddle.in_dynamic_mode(): assert list(diff.shape) == [batch_size, nloc, nall, 3] # nloc x nall - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) # if central atom has two zero distances, sorting sometimes can not exclude itself rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).to(device=rr.place).unsqueeze(0) rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) @@ -267,8 +265,7 @@ def build_directional_neighbor_list( if paddle.in_dynamic_mode(): assert list(diff.shape) == [batch_size, nloc_cntl, nall_neig, 3] # nloc x nall - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) # We assume that the central and neighbor atoms are diffferent, @@ -300,12 +297,7 @@ def nlist_distinguish_types( tmp_atype = paddle.tile(atype.unsqueeze(1), [1, nloc, 1]) mask = nlist == -1 # nloc x s(nsel) - # tnlist = paddle.take_along_axis( - # tmp_atype, - # axis=2, - # indices=nlist.masked_fill(mask, 0), - # ) - tnlist = decomp.take_along_axis( + tnlist = paddle.take_along_axis( tmp_atype, axis=2, indices=nlist.masked_fill(mask, 0), @@ -322,8 +314,7 @@ def nlist_distinguish_types( paddle.argsort(pick_mask, axis=-1, descending=True, stable=True), ) # nloc x s(nsel) - # inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) - inlist = decomp.take_along_axis(nlist, axis=2, indices=imap) + inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) inlist = inlist.masked_fill(~(pick_mask.to(paddle.bool)), -1) # nloc x nsel[ii] ret_nlist.append(paddle.split(inlist, [ss, snsel - ss], axis=-1)[0]) @@ -404,17 +395,13 @@ def build_multiple_neighbor_list( .expand([-1, -1, 3]) ) # nb x nloc x nsel x 3 - # coord2 = paddle.take_along_axis(coord1, axis=1, index=index).reshape( - # [nb, nloc, nsel, 3] - # ) - coord2 = decomp.take_along_axis(coord1, axis=1, indices=index).reshape( + coord2 = paddle.take_along_axis(coord1, axis=1, indices=index).reshape( [nb, nloc, nsel, 3] ) # nb x nloc x nsel x 3 diff = coord2 - coord0[:, :, None, :] # nb x nloc x nsel - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) rr.masked_fill(nlist_mask, float("inf")) nlist0 = nlist ret = {} @@ -516,8 +503,7 @@ def extend_coord_with_ghosts( xyz = xyz.reshape([-1, 3]) # xyz = xyz.to(device=device) # ns x 3 - # shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] - shift_idx = xyz[paddle.argsort(decomp.norm(xyz, axis=1))] + shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] ns, _ = shift_idx.shape nall = ns * nloc # nf x ns x 3 diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py index 21927e3619..f3e3eaa52d 100644 --- a/deepmd/pd/utils/region.py +++ b/deepmd/pd/utils/region.py @@ -1,10 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import paddle -from deepmd.pd.utils import ( - decomp, -) - def phys2inter( coord: paddle.Tensor, @@ -82,14 +78,11 @@ def to_face_distance( def b_to_face_distance(cell): volume = paddle.linalg.det(cell) c_yz = paddle.cross(cell[:, 1], cell[:, 2], axis=-1) - # _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) - _h2yz = volume / decomp.norm(c_yz, axis=-1) + _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) c_zx = paddle.cross(cell[:, 2], cell[:, 0], axis=-1) - # _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) - _h2zx = volume / decomp.norm(c_zx, axis=-1) + _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) c_xy = paddle.cross(cell[:, 0], cell[:, 1], axis=-1) - # _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) - _h2xy = volume / decomp.norm(c_xy, axis=-1) + _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) return paddle.stack([_h2yz, _h2zx, _h2xy], axis=1) From 4c925f9a7c95e7e97510ede8a7c129d0619f83a4 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 28 Nov 2024 20:51:58 +0800 Subject: [PATCH 44/58] remove redundant files --- .pre-commit-config.yaml | 52 ++++++++++++++++---------------- deepmd/pd/utils/learning_rate.py | 8 ----- deepmd/pd/utils/plugin.py | 16 ---------- source/tests/pd/requirements.txt | 6 ---- 4 files changed, 26 insertions(+), 56 deletions(-) delete mode 100644 deepmd/pd/utils/learning_rate.py delete mode 100644 deepmd/pd/utils/plugin.py delete mode 100644 source/tests/pd/requirements.txt diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fb1a5ff907..7aa2012200 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - # - repo: https://github.com/pre-commit/mirrors-prettier - # rev: v4.0.0-alpha.8 - # hooks: - # - id: prettier - # types_or: [markdown, yaml, css] - # # workflow files cannot be modified by pre-commit.ci - # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [markdown, yaml, css] + # workflow files cannot be modified by pre-commit.ci + exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - # - repo: https://github.com/njzjz/mirrors-bibtex-tidy - # rev: v1.13.0 - # hooks: - # - id: bibtex-tidy - # args: - # - --curly - # - --numeric - # - --align=13 - # - --blank-lines - # # disable sort: the order of keys and fields has explict meanings - # #- --sort=key - # - --duplicates=key,doi,citation,abstract - # - --merge=combine - # #- --sort-fields - # #- --strip-comments - # - --trailing-commas - # - --encode-urls - # - --remove-empty-fields - # - --wrap=80 + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/pd/utils/learning_rate.py b/deepmd/pd/utils/learning_rate.py deleted file mode 100644 index 3502434bc0..0000000000 --- a/deepmd/pd/utils/learning_rate.py +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -from deepmd.dpmodel.utils.learning_rate import ( - LearningRateExp, -) - -__all__ = [ - "LearningRateExp", -] diff --git a/deepmd/pd/utils/plugin.py b/deepmd/pd/utils/plugin.py deleted file mode 100644 index aa901c06e8..0000000000 --- a/deepmd/pd/utils/plugin.py +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -"""Base of plugin systems.""" - -from deepmd.utils.plugin import ( - Plugin, - PluginVariant, - VariantABCMeta, - VariantMeta, -) - -__all__ = [ - "Plugin", - "VariantMeta", - "VariantABCMeta", - "PluginVariant", -] diff --git a/source/tests/pd/requirements.txt b/source/tests/pd/requirements.txt deleted file mode 100644 index 74abad719e..0000000000 --- a/source/tests/pd/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -tensorflow>=2.14.0 -deepmd-kit>=2.2.7 -dpdata -ase -coverage -pytest From dd3191a612fb1d58e59b7b821d3f8d762e88cfd5 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 11:17:41 +0800 Subject: [PATCH 45/58] update unitest and codes --- .../pd/model/atomic_model/dp_atomic_model.py | 38 +++++- deepmd/pd/model/descriptor/dpa1.py | 21 +++- deepmd/pd/model/descriptor/se_atten.py | 114 ++++++++++++------ deepmd/pd/model/model/ener_model.py | 17 ++- deepmd/pd/model/network/network.py | 25 +++- deepmd/pd/model/task/fitting.py | 75 +++++++----- deepmd/pd/utils/exclude_mask.py | 12 +- source/tests/pd/common.py | 8 ++ source/tests/pd/model/test_descriptor.py | 3 +- source/tests/pd/model/test_env_mat.py | 6 +- source/tests/pd/model/test_forward_lower.py | 3 +- source/tests/pd/model/test_null_input.py | 8 ++ source/tests/pd/model/test_permutation.py | 23 ++-- source/tests/pd/model/test_rot.py | 1 - source/tests/pd/model/test_smooth.py | 36 ++++++ source/tests/pd/model/test_trans.py | 1 - source/tests/pd/test_decomp.py | 61 ---------- source/tests/pd/test_finetune.py | 1 - source/tests/pd/test_training.py | 27 ++++- 19 files changed, 298 insertions(+), 182 deletions(-) diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py index 25a0f89d77..1089b93a68 100644 --- a/deepmd/pd/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy import functools import logging from typing import ( @@ -52,7 +51,7 @@ def __init__( fitting, type_map: list[str], **kwargs, - ): + ) -> None: super().__init__(type_map, **kwargs) ntypes = len(type_map) self.type_map = type_map @@ -201,7 +200,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data) -> "DPAtomicModel": - data = copy.deepcopy(data) + data = data.copy() check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) @@ -212,6 +211,37 @@ def deserialize(cls, data) -> "DPAtomicModel": obj = super().deserialize(data) return obj + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call descriptor enable_compression(). + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.descriptor.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord, @@ -278,7 +308,7 @@ def compute_or_load_stat( self, sampled_func, stat_file_path: Optional[DPPath] = None, - ): + ) -> None: """ Compute or load the statistics parameters of the model, such as mean and standard deviation of descriptors or the energy bias of the fitting net. diff --git a/deepmd/pd/model/descriptor/dpa1.py b/deepmd/pd/model/descriptor/dpa1.py index 0d8b9dc9b1..f3f1ea26d6 100644 --- a/deepmd/pd/model/descriptor/dpa1.py +++ b/deepmd/pd/model/descriptor/dpa1.py @@ -22,6 +22,7 @@ env, ) from deepmd.pd.utils.env import ( + PRECISION_DICT, RESERVED_PRECISON_DICT, ) from deepmd.pd.utils.update_sel import ( @@ -245,7 +246,7 @@ def __init__( # not implemented spin=None, type: Optional[str] = None, - ): + ) -> None: super().__init__() # Ensure compatibility with the deprecated stripped_type_embedding option. if stripped_type_embedding is not None: @@ -305,6 +306,7 @@ def __init__( use_tebd_bias=use_tebd_bias, type_map=type_map, ) + self.prec = PRECISION_DICT[precision] self.tebd_dim = tebd_dim self.concat_output_tebd = concat_output_tebd self.trainable = trainable @@ -370,7 +372,7 @@ def get_env_protection(self) -> float: """Returns the protection of building environment matrix.""" return self.se_atten.get_env_protection() - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), @@ -624,22 +626,35 @@ def forward( The smooth switch function. shape: nf x nloc x nnei """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) del mapping nframes, nloc, nnei = nlist.shape nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 g1_ext = self.type_embedding(extended_atype) g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None g1, g2, h2, rot_mat, sw = self.se_atten( nlist, extended_coord, extended_atype, g1_ext, mapping=None, + type_embedding=type_embedding, ) if self.concat_output_tebd: g1 = paddle.concat([g1, g1_inp], axis=-1) - return g1, rot_mat, g2, h2, sw + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION) if g2 is not None else None, + h2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) @classmethod def update_sel( diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py index ae76d331fd..6829e4ea8a 100644 --- a/deepmd/pd/model/descriptor/se_atten.py +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -208,12 +208,8 @@ def __init__( ) wanted_shape = (self.ntypes, self.nnei, 4) - mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( - device=env.DEVICE - ) - stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( - device=env.DEVICE - ) + mean = paddle.zeros(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + stddev = paddle.ones(wanted_shape, dtype=self.prec).to(device=env.DEVICE) self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) self.tebd_dim_input = self.tebd_dim if self.type_one_side else self.tebd_dim * 2 @@ -296,7 +292,7 @@ def get_dim_emb(self) -> int: """Returns the output dimension of embedding.""" return self.filter_neuron[-1] - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ("avg", "data_avg", "davg"): self.mean = value elif key in ("std", "data_std", "dstd"): @@ -347,7 +343,7 @@ def compute_input_stats( self, merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, - ): + ) -> None: """ Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. @@ -393,7 +389,7 @@ def get_stats(self) -> dict[str, StatItem]: def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.exclude_types = exclude_types self.is_sorted = len(self.exclude_types) == 0 self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) @@ -416,6 +412,7 @@ def forward( extended_atype: paddle.Tensor, extended_atype_embd: Optional[paddle.Tensor] = None, mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, ): """Compute the descriptor. @@ -431,6 +428,9 @@ def forward( The extended type embedding of atoms. shape: nf x nall mapping The index mapping, not required by this descriptor. + type_embedding + Full type embeddings. shape: (ntypes+1) x nt + Required for stripped type embeddings. Returns ------- @@ -471,23 +471,12 @@ def forward( nlist_mask = nlist != -1 nlist = paddle.where(nlist == -1, paddle.zeros_like(nlist), nlist) sw = paddle.squeeze(sw, -1) - # nf x nloc x nt -> nf x nloc x nnei x nt - atype_tebd = extended_atype_embd[:, :nloc, :] - atype_tebd_nnei = atype_tebd.unsqueeze(2).expand([-1, -1, self.nnei, -1]) # nf x nall x nt nt = extended_atype_embd.shape[-1] - atype_tebd_ext = extended_atype_embd - # nb x (nloc x nnei) x nt - index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) - # nb x (nloc x nnei) x nt - atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, indices=index) - # nb x nloc x nnei x nt - atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) # beyond the cutoff sw should be 0.0 sw = sw.masked_fill(~nlist_mask, 0.0) # (nb x nloc) x nnei exclude_mask = exclude_mask.reshape([nb * nloc, nnei]) - # nfnl x nnei x 4 dmatrix = dmatrix.reshape([-1, self.nnei, 4]) nfnl = dmatrix.shape[0] @@ -495,9 +484,25 @@ def forward( rr = dmatrix rr = rr * exclude_mask[:, :, None].astype(rr.dtype) ss = rr[:, :, :1] - nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) - atype_tebd = atype_tebd_nnei.reshape([nfnl, nnei, self.tebd_dim]) if self.tebd_input_mode in ["concat"]: + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) + # nb x (nloc x nnei) x nt + atype_tebd_nlist = paddle.take_along_axis( + atype_tebd_ext, axis=1, indices=index + ) # j + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) + + # nf x nloc x nt -> nf x nloc x nnei x nt + atype_tebd = extended_atype_embd[:, :nloc, :] + atype_tebd_nnei = atype_tebd.unsqueeze(2).expand( + [-1, -1, self.nnei, -1] + ) # i + + nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) + atype_tebd = atype_tebd_nnei.reshape([nfnl, nnei, self.tebd_dim]) if not self.type_one_side: # nfnl x nnei x (1 + tebd_dim * 2) ss = paddle.concat([ss, nlist_tebd, atype_tebd], axis=2) @@ -515,24 +520,55 @@ def forward( # nfnl x 4 x ng xyz_scatter = paddle.matmul(rr.transpose([0, 2, 1]), gg) elif self.tebd_input_mode in ["strip"]: + assert self.filter_layers_strip is not None + assert type_embedding is not None + ng = self.filter_neuron[-1] + ntypes_with_padding = type_embedding.shape[0] + # nf x (nl x nnei) + nlist_index = nlist.reshape([nb, nloc * nnei]) + # nf x (nl x nnei) + nei_type = paddle.take_along_axis( + extended_atype, indices=nlist_index, axis=1 + ) + # (nf x nl x nnei) x ng + nei_type_index = nei_type.reshape([-1, 1]).expand([-1, ng]).to(paddle.int64) + if self.type_one_side: + tt_full = self.filter_layers_strip.networks[0](type_embedding) + # (nf x nl x nnei) x ng + gg_t = paddle.take_along_axis(tt_full, indices=nei_type_index, axis=0) + else: + idx_i = paddle.tile( + atype.reshape([-1, 1]) * ntypes_with_padding, [1, nnei] + ).reshape([-1]) + idx_j = nei_type.reshape([-1]) + # (nf x nl x nnei) x ng + idx = (idx_i + idx_j).reshape([-1, 1]).expand([-1, ng]).to(paddle.int64) + # (ntypes) * ntypes * nt + type_embedding_nei = paddle.tile( + type_embedding.reshape([1, ntypes_with_padding, nt]), + [ntypes_with_padding, 1, 1], + ) + # ntypes * (ntypes) * nt + type_embedding_center = paddle.tile( + type_embedding.reshape([ntypes_with_padding, 1, nt]), + [1, ntypes_with_padding, 1], + ) + # (ntypes * ntypes) * (nt+nt) + two_side_type_embedding = paddle.concat( + [type_embedding_nei, type_embedding_center], -1 + ).reshape([-1, nt * 2]) + tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) + # (nf x nl x nnei) x ng + gg_t = paddle.take_along_axis(tt_full, axis=0, indices=idx) + # (nf x nl) x nnei x ng + gg_t = gg_t.reshape([nfnl, nnei, ng]) + if self.smooth: + gg_t = gg_t * sw.reshape([-1, self.nnei, 1]) if self.compress: raise NotImplementedError("Compression is not implemented yet.") else: # nfnl x nnei x ng gg_s = self.filter_layers.networks[0](ss) - assert self.filter_layers_strip is not None - if not self.type_one_side: - # nfnl x nnei x (tebd_dim * 2) - tt = paddle.concat( - [nlist_tebd, atype_tebd], axis=2 - ) # dynamic, index - else: - # nfnl x nnei x tebd_dim - tt = nlist_tebd - # nfnl x nnei x ng - gg_t = self.filter_layers_strip.networks[0](tt) - if self.smooth: - gg_t = gg_t * sw.reshape([-1, self.nnei, 1]) # nfnl x nnei x ng gg = gg_s * gg_t + gg_s input_r = paddle_func.normalize( @@ -590,7 +626,7 @@ def __init__( smooth: bool = True, precision: str = DEFAULT_PRECISION, seed: Optional[Union[int, list[int]]] = None, - ): + ) -> None: """Construct a neighbor-wise attention net.""" super().__init__() self.layer_num = layer_num @@ -660,7 +696,7 @@ def __getitem__(self, key): else: raise TypeError(key) - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if not isinstance(key, int): raise TypeError(key) if isinstance(value, self.network_type): @@ -732,7 +768,7 @@ def __init__( ln_eps: float = 1e-5, precision: str = DEFAULT_PRECISION, seed: Optional[Union[int, list[int]]] = None, - ): + ) -> None: """Construct a neighbor-wise attention layer.""" super().__init__() self.nnei = nnei @@ -839,7 +875,7 @@ def __init__( smooth: bool = True, precision: str = DEFAULT_PRECISION, seed: Optional[Union[int, list[int]]] = None, - ): + ) -> None: """Construct a multi-head neighbor-wise attention net.""" super().__init__() assert hidden_dim % num_heads == 0, "hidden_dim must be divisible by num_heads" diff --git a/deepmd/pd/model/model/ener_model.py b/deepmd/pd/model/model/ener_model.py index 3f3db4a527..a5b1b9d4b3 100644 --- a/deepmd/pd/model/model/ener_model.py +++ b/deepmd/pd/model/model/ener_model.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from copy import ( - deepcopy, -) from typing import ( Optional, ) @@ -33,26 +30,26 @@ def __init__( self, *args, **kwargs, - ): + ) -> None: DPModelCommon.__init__(self) DPEnergyModel_.__init__(self, *args, **kwargs) def translated_output_def(self): out_def_data = self.model_output_def().get_data() output_def = { - "atom_energy": deepcopy(out_def_data["energy"]), - "energy": deepcopy(out_def_data["energy_redu"]), + "atom_energy": out_def_data["energy"], + "energy": out_def_data["energy_redu"], } if self.do_grad_r("energy"): - output_def["force"] = deepcopy(out_def_data["energy_derv_r"]) + output_def["force"] = out_def_data["energy_derv_r"] output_def["force"].squeeze(-2) if self.do_grad_c("energy"): - output_def["virial"] = deepcopy(out_def_data["energy_derv_c_redu"]) + output_def["virial"] = out_def_data["energy_derv_c_redu"] output_def["virial"].squeeze(-2) - output_def["atom_virial"] = deepcopy(out_def_data["energy_derv_c"]) + output_def["atom_virial"] = out_def_data["energy_derv_c"] output_def["atom_virial"].squeeze(-3) if "mask" in out_def_data: - output_def["mask"] = deepcopy(out_def_data["mask"]) + output_def["mask"] = out_def_data["mask"] return output_def def forward( diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index f118c234ab..a95dd45a44 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -45,7 +45,7 @@ def __init__( use_econf_tebd=False, use_tebd_bias: bool = False, type_map=None, - ): + ) -> None: """Construct a type embedding net.""" super().__init__() self.type_nums = type_nums @@ -80,11 +80,28 @@ def forward(self, atype): """ return self.embedding(atype.place)[atype] - def share_params(self, base_class, shared_level, resume=False): + def get_full_embedding(self, device: str | paddle.base.libpaddle.Place): + """ + Get the type embeddings of all types. + + Parameters + ---------- + device : torch.device + The device on which to perform the computation. + + Returns + ------- + type_embedding : torch.Tensor + The full type embeddings of all types. The last index corresponds to the zero padding. + Shape: (ntypes + 1) x tebd_dim + """ + return self.embedding(device) + + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -148,7 +165,7 @@ def __init__( use_econf_tebd: bool = False, use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, - ): + ) -> None: """Construct a type embedding net.""" super().__init__() self.ntypes = ntypes diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index 375cf834cc..1eed95645b 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy import logging from abc import ( abstractmethod, @@ -55,7 +54,7 @@ def __new__(cls, *args, **kwargs): return BaseFitting.__new__(BaseFitting, *args, **kwargs) return super().__new__(cls) - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), @@ -65,14 +64,7 @@ def share_params(self, base_class, shared_level, resume=False): self.__class__ == base_class.__class__ ), "Only fitting nets of the same type can share params!" if shared_level == 0: - # link buffers - if hasattr(self, "bias_atom_e"): - self.bias_atom_e = base_class.bias_atom_e - # the following will successfully link all the params except buffers, which need manually link. - for item in self._sub_layers: - self._sub_layers[item] = base_class._sub_layers[item] - elif shared_level == 1: - # only not share the bias_atom_e + # only not share the bias_atom_e and the case_embd # the following will successfully link all the params except buffers, which need manually link. for item in self._sub_layers: self._sub_layers[item] = base_class._sub_layers[item] @@ -104,7 +96,6 @@ class GeneralFitting(Fitting): numb_aparam : int Number of atomic parameters. dim_case_embd : int - (Not supported yet) Dimension of case specific embedding. activation_function : str Activation function. @@ -155,7 +146,7 @@ def __init__( type_map: Optional[list[str]] = None, use_aparam_as_mask: bool = False, **kwargs, - ): + ) -> None: super().__init__() self.var_name = var_name self.ntypes = ntypes @@ -166,9 +157,6 @@ def __init__( self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam self.dim_case_embd = dim_case_embd - if dim_case_embd > 0: - raise ValueError("dim_case_embd is not supported yet in PaddlePaddle.") - self.case_embd = None self.activation_function = activation_function self.precision = precision self.prec = PRECISION_DICT[self.precision] @@ -189,7 +177,9 @@ def __init__( # init constants if bias_atom_e is None: bias_atom_e = np.zeros([self.ntypes, net_dim_out], dtype=np.float64) - bias_atom_e = paddle.to_tensor(bias_atom_e, dtype=self.prec).to(device=device) + bias_atom_e = paddle.to_tensor( + bias_atom_e, dtype=env.GLOBAL_PD_FLOAT_PRECISION, place=device + ) bias_atom_e = bias_atom_e.reshape([self.ntypes, net_dim_out]) if not self.mixed_types: assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" @@ -218,10 +208,20 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None + if self.dim_case_embd > 0: + self.register_buffer( + "case_embd", + paddle.zeros(self.dim_case_embd, dtype=self.prec, place=device), + # torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[0], + ) + else: + self.case_embd = None + in_dim = ( self.dim_descrpt + self.numb_fparam + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd ) self.filter_layers = NetworkCollection( @@ -249,7 +249,7 @@ def __init__( def reinit_exclude( self, exclude_types: list[int] = [], - ): + ) -> None: self.exclude_types = exclude_types self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) @@ -299,7 +299,7 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), - "case_embd": None, + "case_embd": to_numpy_array(self.case_embd), "fparam_avg": to_numpy_array(self.fparam_avg), "fparam_inv_std": to_numpy_array(self.fparam_inv_std), "aparam_avg": to_numpy_array(self.aparam_avg), @@ -321,7 +321,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": - data = copy.deepcopy(data) + data = data.copy() variables = data.pop("@variables") nets = data.pop("nets") obj = cls(**data) @@ -364,9 +364,11 @@ def set_case_embd(self, case_idx: int): Set the case embedding of this fitting net by the given case_idx, typically concatenated with the output of the descriptor and fed into the fitting net. """ - raise NotImplementedError("set_case_embd is not supported yet in PaddlePaddle.") + self.case_embd = paddle.eye(self.dim_case_embd, dtype=self.prec).to(device)[ + case_idx + ] - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ["bias_atom_e"]: value = value.reshape([self.ntypes, self._net_out_dim()]) self.bias_atom_e = value @@ -424,7 +426,11 @@ def _forward_common( fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, ): - xx = descriptor + # cast the input to internal precsion + xx = descriptor.to(self.prec) + fparam = fparam.to(self.prec) if fparam is not None else None + aparam = aparam.to(self.prec) if aparam is not None else None + if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set # Ideally, the input for vacuum should be computed; @@ -492,15 +498,30 @@ def _forward_common( axis=-1, ) + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = paddle.tile(self.case_embd.reshape([1, 1, -1]), [nf, nloc, 1]) + xx = paddle.concat( + [xx, case_embd], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, case_embd], + axis=-1, + ) + outs = paddle.zeros( (nf, nloc, net_dim_out), dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ).to(device=descriptor.place) # jit assertion + ).to(device=descriptor.place) if self.mixed_types: atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] if xx_zeros is not None: atom_property -= self.filter_layers.networks[0](xx_zeros) - outs = outs + atom_property # Shape is [nframes, natoms[0], net_dim_out] + outs = ( + outs + atom_property + self.bias_atom_e[atype].to(self.prec) + ) # Shape is [nframes, natoms[0], net_dim_out] else: for type_i, ll in enumerate(self.filter_layers.networks): mask = (atype == type_i).unsqueeze(-1) @@ -516,12 +537,12 @@ def _forward_common( ): atom_property -= ll(xx_zeros) atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = atom_property * mask.astype(atom_property.dtype) + atom_property = paddle.where(mask, atom_property, 0.0) outs = ( outs + atom_property ) # Shape is [nframes, natoms[0], net_dim_out] # nf x nloc - mask = self.emask(atype) + mask = self.emask(atype).to("bool") # nf x nloc x nod - outs = outs * mask[:, :, None].astype(outs.dtype) + outs = paddle.where(mask[:, :, None], outs, 0.0) return {self.var_name: outs.astype(env.GLOBAL_PD_FLOAT_PRECISION)} diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py index 37b5bc79ed..29c9cc3501 100644 --- a/deepmd/pd/utils/exclude_mask.py +++ b/deepmd/pd/utils/exclude_mask.py @@ -15,7 +15,7 @@ def __init__( self, ntypes: int, exclude_types: list[int] = [], - ): + ) -> None: super().__init__() self.reinit(ntypes, exclude_types) @@ -23,7 +23,7 @@ def reinit( self, ntypes: int, exclude_types: list[int] = [], - ): + ) -> None: self.ntypes = ntypes self.exclude_types = exclude_types self.type_mask = np.array( @@ -68,7 +68,7 @@ def __init__( self, ntypes: int, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: super().__init__() self.reinit(ntypes, exclude_types) @@ -76,7 +76,7 @@ def reinit( self, ntypes: int, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.ntypes = ntypes self._exclude_types: set[tuple[int, int]] = set() for tt in exclude_types: @@ -134,9 +134,7 @@ def forward( [ atype_ext, self.ntypes - * paddle.ones([nf, 1], dtype=atype_ext.dtype).to( - device=atype_ext.place - ), + * paddle.ones([nf, 1], dtype=atype_ext.dtype).to(atype_ext.place), ], axis=-1, ) diff --git a/source/tests/pd/common.py b/source/tests/pd/common.py index 59a9672330..d73544c5f1 100644 --- a/source/tests/pd/common.py +++ b/source/tests/pd/common.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import pathlib from typing import ( Optional, Union, @@ -7,6 +8,7 @@ import numpy as np import paddle +from deepmd.common import j_loader as dp_j_loader from deepmd.main import ( main, ) @@ -15,6 +17,12 @@ GLOBAL_PD_FLOAT_PRECISION, ) +tests_path = pathlib.Path(__file__).parent.absolute() + + +def j_loader(filename): + return dp_j_loader(tests_path / filename) + def run_dp(cmd: str) -> int: """Run DP directly from the entry point instead of the subprocess. diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py index 10f2fd271b..dc78856851 100644 --- a/source/tests/pd/model/test_descriptor.py +++ b/source/tests/pd/model/test_descriptor.py @@ -17,7 +17,6 @@ prod_env_mat, ) from deepmd.pd.utils import ( - decomp, dp_random, env, ) @@ -179,7 +178,7 @@ def test_consistency(self): my_nlist = nlist.reshape([bsz, -1]).cpu() mask = my_nlist == -1 my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - my_nlist = decomp.take_along_axis(mapping, axis=-1, indices=my_nlist) + my_nlist = paddle.take_along_axis(mapping, axis=-1, indices=my_nlist) my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - mask.astype( my_nlist.dtype ) diff --git a/source/tests/pd/model/test_env_mat.py b/source/tests/pd/model/test_env_mat.py index 7cbc698264..bbdb7c75a3 100644 --- a/source/tests/pd/model/test_env_mat.py +++ b/source/tests/pd/model/test_env_mat.py @@ -22,7 +22,7 @@ class TestCaseSingleFrameWithNlist: - def setUp(self): + def setUp(self) -> None: # nloc == 3, nall == 4 self.nloc = 3 self.nall = 4 @@ -155,12 +155,12 @@ def setUp(self): # to be merged with the tf test case class TestEnvMat(unittest.TestCase, TestCaseSingleFrameWithNlist): - def setUp(self): + def setUp(self) -> None: TestCaseSingleFrameWithNlist.setUp(self) def test_consistency( self, - ): + ) -> None: rng = np.random.default_rng(GLOBAL_SEED) nf, nloc, nnei = self.nlist.shape davg = rng.normal(size=(self.nt, nnei, 4)) diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py index ac8d0f54fc..db6497b605 100644 --- a/source/tests/pd/model/test_forward_lower.py +++ b/source/tests/pd/model/test_forward_lower.py @@ -96,7 +96,7 @@ def test( mixed_types=self.model.mixed_types(), box=cell.unsqueeze(0), ) - extended_spin = decomp.take_along_axis( + extended_spin = paddle.take_along_axis( spin.unsqueeze(0), indices=mapping.unsqueeze(-1).tile((1, 1, 3)), axis=1 ) input_dict = { @@ -146,7 +146,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): def setUp(self): self.prec = 1e-10 diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py index 9bf0860265..5d67491943 100644 --- a/source/tests/pd/model/test_null_input.py +++ b/source/tests/pd/model/test_null_input.py @@ -22,6 +22,7 @@ eval_model, ) from .test_permutation import ( + model_dpa1, model_se_e2_a, ) @@ -92,3 +93,10 @@ def setUp(self): model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelDPA1(unittest.TestCase, NullTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py index 8482ca7ffe..4543348d3b 100644 --- a/source/tests/pd/model/test_permutation.py +++ b/source/tests/pd/model/test_permutation.py @@ -3,6 +3,7 @@ import os import unittest +import numpy as np import paddle from deepmd.pd.model.model import ( @@ -22,7 +23,6 @@ CUR_DIR = os.path.dirname(__file__) dtype = paddle.float64 -import numpy as np model_se_e2_a = { "type_map": ["O", "H", "B"], @@ -344,7 +344,7 @@ class PermutationTest: def test( self, - ): + ) -> None: natoms = 5 generator = paddle.seed(GLOBAL_SEED) cell = paddle.rand([3, 3], dtype=dtype) @@ -395,7 +395,7 @@ def test( class TestEnergyModelSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -403,15 +403,14 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestDOSModelSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dos) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -419,7 +418,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -427,7 +426,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestForceModelDPA2(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) model_params["fitting_net"]["type"] = "direct_force_ener" self.type_split = True @@ -437,7 +436,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybrid(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -445,7 +444,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestForceModelHybrid(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) model_params["fitting_net"]["type"] = "direct_force_ener" self.type_split = True @@ -455,7 +454,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBL(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -463,7 +462,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_spin) self.type_split = False self.test_spin = True diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py index 4d59117560..85c90dc60f 100644 --- a/source/tests/pd/model/test_rot.py +++ b/source/tests/pd/model/test_rot.py @@ -169,7 +169,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, RotTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py index 7f77a6f188..cc50043ad8 100644 --- a/source/tests/pd/model/test_smooth.py +++ b/source/tests/pd/model/test_smooth.py @@ -19,6 +19,7 @@ eval_model, ) from .test_permutation import ( # model_dpau, + model_dpa1, model_se_e2_a, ) @@ -153,6 +154,41 @@ def setUp(self): self.epsilon, self.aprec = None, None +class TestEnergyModelDPA1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + +class TestEnergyModelDPA1Excl1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + model_params["pair_exclude_types"] = [[0, 1]] + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + +class TestEnergyModelDPA1Excl12(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + model_params["pair_exclude_types"] = [[0, 1], [0, 2]] + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py index f69d2f5b83..3fae49d598 100644 --- a/source/tests/pd/model/test_trans.py +++ b/source/tests/pd/model/test_trans.py @@ -103,7 +103,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, TransTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) diff --git a/source/tests/pd/test_decomp.py b/source/tests/pd/test_decomp.py index d8439ad994..c554083bda 100644 --- a/source/tests/pd/test_decomp.py +++ b/source/tests/pd/test_decomp.py @@ -17,50 +17,6 @@ class TestDecomp(unittest.TestCase): def setUp(self): paddle.seed(GLOBAL_SEED) - def test_softmax_decomp(self): - raw_api = paddle.nn.functional.softmax - decomp_api = decomp.softmax - - raw_input = paddle.randn([100, 100], "float32") - raw_output = raw_api(raw_input) - decomp_output = decomp_api(raw_input) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), - 1e-6, - 1e-8, - ) - - def test_norm_decomp(self): - raw_api = paddle.linalg.norm - decomp_api = decomp.norm - - raw_input = paddle.randn([100, 100], "float32") - raw_output = raw_api(raw_input, p=2, axis=-1) - decomp_output = decomp_api(raw_input, p=2, axis=-1) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), - 1e-5, - 1e-8, - ) - - def test_take_along_axis_decomp(self): - raw_api = paddle.take_along_axis - decomp_api = decomp.take_along_axis - - raw_input = paddle.randn([100, 100], "float32") - raw_indices = paddle.randint(0, 100, [100, 2]) - raw_output = raw_api(raw_input, raw_indices, axis=-1) - decomp_output = decomp_api(raw_input, raw_indices, axis=-1) - - np.testing.assert_equal( - raw_output.numpy(), - decomp_output.numpy(), - ) - def test_scatter_reduce_decomp(self): raw_api = paddle.put_along_axis decomp_api = decomp.scatter_reduce @@ -112,20 +68,3 @@ def test_masked_add_(self): raw_output.numpy(), raw_input.numpy(), # inplace ) - - def test_normalize_decomp(self): - raw_api = paddle.nn.functional.normalize - decomp_api = decomp.normalize_decomp - - raw_input = paddle.randn([100, 100], "float32") - axis = -1 - - raw_output = raw_api(raw_input, p=2, axis=axis) - decomp_output = decomp_api(raw_input, p=2, axis=axis) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), # inplace - 1e-5, - 1e-8, - ) diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py index 2c6cca83aa..f82f7a8cd0 100644 --- a/source/tests/pd/test_finetune.py +++ b/source/tests/pd/test_finetune.py @@ -341,7 +341,6 @@ def setUp(self): self.testkey = "dos" -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): def setUp(self): input_json = str(Path(__file__).parent / "water/se_atten.json") diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py index d4e7309a65..ffdce9bbb2 100644 --- a/source/tests/pd/test_training.py +++ b/source/tests/pd/test_training.py @@ -20,12 +20,13 @@ ) from .model.test_permutation import ( + model_dpa1, model_se_e2_a, ) class DPTrainTest: - def test_dp_train(self): + def test_dp_train(self) -> None: # test training from scratch trainer = get_trainer(deepcopy(self.config)) trainer.run() @@ -95,7 +96,7 @@ def test_dp_train(self): trainer_finetune_empty.run() trainer_finetune_random.run() - def test_trainable(self): + def test_trainable(self) -> None: fix_params = deepcopy(self.config) fix_params["model"]["descriptor"]["trainable"] = False fix_params["model"]["fitting_net"]["trainable"] = False @@ -124,7 +125,7 @@ def test_trainable(self): model_dict_after_training[key].numpy(), ) - def tearDown(self): + def tearDown(self) -> None: for f in os.listdir("."): if f.startswith("model") and f.endswith(".pd"): os.remove(f) @@ -135,7 +136,7 @@ def tearDown(self): class TestEnergyModelSeA(unittest.TestCase, DPTrainTest): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -153,7 +154,7 @@ def tearDown(self) -> None: class TestFparam(unittest.TestCase, DPTrainTest): """Test if `fparam` can be loaded correctly.""" - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -172,5 +173,21 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestEnergyModelDPA1(unittest.TestCase, DPTrainTest): + def setUp(self) -> None: + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() From 7df0e2f54a617f12dfd5e9efbb183e830dd88726 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 11:52:11 +0800 Subject: [PATCH 46/58] fix --- deepmd/pd/model/network/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index a95dd45a44..a135bcbb9f 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -80,7 +80,7 @@ def forward(self, atype): """ return self.embedding(atype.place)[atype] - def get_full_embedding(self, device: str | paddle.base.libpaddle.Place): + def get_full_embedding(self, device: str): """ Get the type embeddings of all types. From ac479edfbd9badef4cbfd564fa25dbc615bef6e4 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 17:25:42 +0800 Subject: [PATCH 47/58] update code --- .pre-commit-config.yaml | 52 +- deepmd/pd/model/descriptor/se_a.py | 147 +++-- deepmd/pd/model/descriptor/se_atten.py | 38 +- deepmd/pd/model/model/make_model.py | 9 +- deepmd/pd/utils/env.py | 6 +- source/tests/pd/model/models/dpa1.json | 36 ++ source/tests/pd/model/models/dpa1.pd | Bin 0 -> 11329 bytes source/tests/pd/model/models/dpa2_tebd.pd | Bin 0 -> 537 bytes .../pd/model/test_atomic_model_atomic_stat.py | 431 +++++++++++++++ .../pd/model/test_atomic_model_global_stat.py | 510 ++++++++++++++++++ source/tests/pd/model/test_descriptor_dpa1.py | 388 +++++++++++++ source/tests/pd/model/test_dpa1.py | 164 ++++++ .../pd/model/test_permutation_denoise.py | 109 ++++ source/tests/pd/model/test_rot_denoise.py | 124 +++++ source/tests/pd/model/test_saveload_dpa1.py | 144 +++++ source/tests/pd/model/test_trans_denoise.py | 95 ++++ source/tests/pd/test_training.py | 6 + 17 files changed, 2181 insertions(+), 78 deletions(-) create mode 100644 source/tests/pd/model/models/dpa1.json create mode 100644 source/tests/pd/model/models/dpa1.pd create mode 100644 source/tests/pd/model/models/dpa2_tebd.pd create mode 100644 source/tests/pd/model/test_atomic_model_atomic_stat.py create mode 100644 source/tests/pd/model/test_atomic_model_global_stat.py create mode 100644 source/tests/pd/model/test_descriptor_dpa1.py create mode 100644 source/tests/pd/model/test_dpa1.py create mode 100644 source/tests/pd/model/test_permutation_denoise.py create mode 100644 source/tests/pd/model/test_rot_denoise.py create mode 100644 source/tests/pd/model/test_saveload_dpa1.py create mode 100644 source/tests/pd/model/test_trans_denoise.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7aa2012200..fb1a5ff907 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,13 +65,13 @@ repos: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) # markdown, yaml, CSS, javascript - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v4.0.0-alpha.8 - hooks: - - id: prettier - types_or: [markdown, yaml, css] - # workflow files cannot be modified by pre-commit.ci - exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) + # - repo: https://github.com/pre-commit/mirrors-prettier + # rev: v4.0.0-alpha.8 + # hooks: + # - id: prettier + # types_or: [markdown, yaml, css] + # # workflow files cannot be modified by pre-commit.ci + # exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt rev: v3.10.0-1 @@ -83,25 +83,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint - - repo: https://github.com/njzjz/mirrors-bibtex-tidy - rev: v1.13.0 - hooks: - - id: bibtex-tidy - args: - - --curly - - --numeric - - --align=13 - - --blank-lines - # disable sort: the order of keys and fields has explict meanings - #- --sort=key - - --duplicates=key,doi,citation,abstract - - --merge=combine - #- --sort-fields - #- --strip-comments - - --trailing-commas - - --encode-urls - - --remove-empty-fields - - --wrap=80 + # - repo: https://github.com/njzjz/mirrors-bibtex-tidy + # rev: v1.13.0 + # hooks: + # - id: bibtex-tidy + # args: + # - --curly + # - --numeric + # - --align=13 + # - --blank-lines + # # disable sort: the order of keys and fields has explict meanings + # #- --sort=key + # - --duplicates=key,doi,citation,abstract + # - --merge=combine + # #- --sort-fields + # #- --strip-comments + # - --trailing-commas + # - --encode-urls + # - --remove-empty-fields + # - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 180d6f0a3f..0af6d082b8 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -9,6 +9,7 @@ import numpy as np import paddle +import paddle.nn as nn from deepmd.dpmodel.utils.seed import ( child_seed, @@ -87,13 +88,14 @@ def __init__( type_map: Optional[list[str]] = None, # not implemented spin=None, - ): + ) -> None: del ntypes if spin is not None: raise NotImplementedError("old implementation of spin is not supported.") super().__init__() self.type_map = type_map self.compress = False + self.prec = PRECISION_DICT[precision] self.sea = DescrptBlockSeA( rcut, rcut_smth, @@ -161,7 +163,7 @@ def get_env_protection(self) -> float: """Returns the protection of building environment matrix.""" return self.sea.get_env_protection() - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), @@ -222,10 +224,35 @@ def compute_input_stats( def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: """Update the type exclusions.""" self.sea.reinit_exclude(exclude_types) + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + raise ValueError("Enable compression is not supported.") + def forward( self, coord_ext: paddle.Tensor, @@ -266,7 +293,18 @@ def forward( The smooth switch function. """ - return self.sea.forward(nlist, coord_ext, atype_ext, None, mapping) + # cast the input to internal precsion + coord_ext = coord_ext.to(dtype=self.prec) + g1, rot_mat, g2, h2, sw = self.sea.forward( + nlist, coord_ext, atype_ext, None, mapping + ) + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) def set_stat_mean_and_stddev( self, @@ -367,10 +405,6 @@ def update_sel( class DescrptBlockSeA(DescriptorBlock): ndescrpt: Final[int] __constants__: ClassVar[list] = ["ndescrpt"] - lower: dict[str, int] - upper: dict[str, int] - table_data: dict[str, paddle.Tensor] - table_config: list[Union[int, float]] def __init__( self, @@ -389,7 +423,7 @@ def __init__( trainable: bool = True, seed: Optional[Union[int, list[int]]] = None, **kwargs, - ): + ) -> None: """Construct an embedding net of type `se_a`. Args: @@ -430,13 +464,6 @@ def __init__( self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) - # add for compression - self.compress = False - self.lower = {} - self.upper = {} - self.table_data = {} - self.table_config = [] - ndim = 1 if self.type_one_side else 2 filter_layers = NetworkCollection( ndim=ndim, ntypes=len(sel), network_type="embedding_network" @@ -459,6 +486,21 @@ def __init__( for param in self.parameters(): param.stop_gradient = not trainable + # add for compression + self.compress = False + self.compress_info = nn.ParameterList( + [ + self.create_parameter([], dtype=self.prec).to(device="cpu") + for _ in range(len(self.filter_layers.networks)) + ] + ) + self.compress_data = nn.ParameterList( + [ + self.create_parameter([], dtype=self.prec).to(device=env.DEVICE) + for _ in range(len(self.filter_layers.networks)) + ] + ) + def get_rcut(self) -> float: """Returns the cut-off radius.""" return self.rcut @@ -517,11 +559,11 @@ def dim_out(self): return self.filter_neuron[-1] * self.axis_neuron @property - def dim_in(self): + def dim_in(self) -> int: """Returns the atomic input dimension of this descriptor.""" return 0 - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ("avg", "data_avg", "davg"): self.mean = value elif key in ("std", "data_std", "dstd"): @@ -541,7 +583,7 @@ def compute_input_stats( self, merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, - ): + ) -> None: """ Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. @@ -587,22 +629,45 @@ def get_stats(self) -> dict[str, StatItem]: def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) def enable_compression( self, - table_data, - table_config, - lower, - upper, + table_data: dict[str, paddle.Tensor], + table_config: list[Union[int, float]], + lower: dict[str, int], + upper: dict[str, int], ) -> None: + for embedding_idx, ll in enumerate(self.filter_layers.networks): + if self.type_one_side: + ii = embedding_idx + ti = -1 + else: + # ti: center atom type, ii: neighbor type... + ii = embedding_idx // self.ntypes + ti = embedding_idx % self.ntypes + if self.type_one_side: + net = "filter_-1_net_" + str(ii) + else: + net = "filter_" + str(ti) + "_net_" + str(ii) + info_ii = paddle.to_tensor( + [ + lower[net], + upper[net], + upper[net] * table_config[0], + table_config[1], + table_config[2], + table_config[3], + ], + dtype=self.prec, + place="cpu", + ) + tensor_data_ii = table_data[net].to(device=env.DEVICE, dtype=self.prec) + self.compress_data[embedding_idx] = tensor_data_ii + self.compress_info[embedding_idx] = info_ii self.compress = True - self.table_data = table_data - self.table_config = table_config - self.lower = lower - self.upper = upper def forward( self, @@ -611,6 +676,7 @@ def forward( extended_atype: paddle.Tensor, extended_atype_embd: Optional[paddle.Tensor] = None, mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, ): """Calculate decoded embedding for each atom. @@ -627,7 +693,7 @@ def forward( del extended_atype_embd, mapping nf = nlist.shape[0] nloc = nlist.shape[1] - atype: paddle.Tensor = extended_atype[:, :nloc] + atype = extended_atype[:, :nloc] dmatrix, diff, sw = prod_env_mat( extended_coord, nlist, @@ -640,7 +706,6 @@ def forward( ) dmatrix = dmatrix.reshape([-1, self.nnei, 4]) - dmatrix = dmatrix.astype(self.prec) nfnl = dmatrix.shape[0] # pre-allocate a shape to pass jit xyz_scatter = paddle.zeros( @@ -649,7 +714,9 @@ def forward( ).to(extended_coord.place) # nfnl x nnei exclude_mask = self.emask(nlist, extended_atype).reshape([nfnl, self.nnei]) - for embedding_idx, ll in enumerate(self.filter_layers.networks): + for embedding_idx, (ll, compress_data_ii, compress_info_ii) in enumerate( + zip(self.filter_layers.networks, self.compress_data, self.compress_info) + ): if self.type_one_side: ii = embedding_idx ti = -1 @@ -680,10 +747,16 @@ def forward( if rr.numel() > 0: rr = rr * mm.unsqueeze(2).astype(rr.dtype) ss = rr[:, :, :1] - # nfnl x nt x ng - gg = ll.forward(ss) - # nfnl x 4 x ng - gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if self.compress: + raise NotImplementedError( + "Compressed environment is not implemented yet." + ) + else: + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if ti_mask is not None: xyz_scatter[ti_mask] += gr else: @@ -699,8 +772,8 @@ def forward( result = result.reshape([nf, nloc, self.filter_neuron[-1] * self.axis_neuron]) rot_mat = rot_mat.reshape([nf, nloc] + list(rot_mat.shape[1:])) # noqa:RUF005 return ( - result.astype(env.GLOBAL_PD_FLOAT_PRECISION), - rot_mat.astype(env.GLOBAL_PD_FLOAT_PRECISION), + result, + rot_mat, None, None, sw, diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py index 6829e4ea8a..1ebf8c6717 100644 --- a/deepmd/pd/model/descriptor/se_atten.py +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -81,7 +81,7 @@ def __init__( ln_eps: Optional[float] = 1e-5, seed: Optional[Union[int, list[int]]] = None, type: Optional[str] = None, - ): + ) -> None: r"""Construct an embedding net of type `se_atten`. Parameters @@ -249,12 +249,20 @@ def __init__( # add for compression self.compress = False self.is_sorted = False - # self.compress_info = nn.ParameterList( - # [self.create_parameter([0], dtype=self.prec).to("cpu")] - # ) - # self.compress_data = nn.ParameterList( - # [self.create_parameter([0], dtype=self.prec).to(env.DEVICE)] - # ) + self.compress_info = nn.ParameterList( + [ + self.create_parameter( + [], default_initializer=nn.initializer.Constant(0), dtype=self.prec + ).to("cpu") + ] + ) + self.compress_data = nn.ParameterList( + [ + self.create_parameter( + [], default_initializer=nn.initializer.Constant(0), dtype=self.prec + ).to(env.DEVICE) + ] + ) def get_rcut(self) -> float: """Returns the cut-off radius.""" @@ -401,9 +409,21 @@ def enable_compression( lower, upper, ) -> None: - raise NotImplementedError( - "Compressed descriptor in paddle is not supported yet." + net = "filter_net" + self.compress_info[0] = paddle.to_tensor( + [ + lower[net], + upper[net], + upper[net] * table_config[0], + table_config[1], + table_config[2], + table_config[3], + ], + dtype=self.prec, + place="cpu", ) + self.compress_data[0] = table_data[net].to(device=env.DEVICE, dtype=self.prec) + self.compress = True def forward( self, diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index ce930a51d6..8a9ee225e0 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -69,7 +69,7 @@ def __init__( # underscore to prevent conflict with normal inputs atomic_model_: Optional[T_AtomicModel] = None, **kwargs, - ): + ) -> None: super().__init__(*args, **kwargs) if atomic_model_ is not None: self.atomic_model: T_AtomicModel = atomic_model_ @@ -125,6 +125,7 @@ def enable_compression( check_frequency, ) + # cannot use the name forward. torch script does not work def forward_common( self, coord, @@ -173,7 +174,9 @@ def forward_common( atype, self.get_rcut(), self.get_sel(), - mixed_types=self.mixed_types(), + # types will be distinguished in the lower interface, + # so it doesn't need to be distinguished here + mixed_types=True, box=bb, ) model_predict_lower = self.forward_common_lower( @@ -408,7 +411,7 @@ def format_nlist( Returns ------- - formatted_nlist + formated_nlist the formatted nlist. """ diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 896f32e9d2..b22d110c00 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -82,7 +82,7 @@ def enable_prim(enable: bool = True): EAGER_COMP_OP_BLACK_LIST = [ "abs_grad", "cast_grad", - "concat_grad", + # "concat_grad", "cos_double_grad", "cos_grad", "cumprod_grad", @@ -115,8 +115,7 @@ def enable_prim(enable: bool = True): "sin_double_grad", "sin_grad", "slice_grad", - "split_grad", - "split_grad", + # "split_grad", "sqrt_grad", "stack_grad", "sum_grad", @@ -137,6 +136,7 @@ def enable_prim(enable: bool = True): "subtract_grad", "tile_grad", ] + EAGER_COMP_OP_BLACK_LIST = list(set(EAGER_COMP_OP_BLACK_LIST)) """Enable running program in primitive C++ API in eager/static mode.""" from paddle.framework import ( diff --git a/source/tests/pd/model/models/dpa1.json b/source/tests/pd/model/models/dpa1.json new file mode 100644 index 0000000000..a969c290ae --- /dev/null +++ b/source/tests/pd/model/models/dpa1.json @@ -0,0 +1,36 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut_smth": 2.0, + "rcut": 6.0, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "attn": 5, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": true, + "temperature": 1.0, + "seed": 1 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pd/model/models/dpa1.pd b/source/tests/pd/model/models/dpa1.pd new file mode 100644 index 0000000000000000000000000000000000000000..147312635cda5afb1065ddcb61b66e90bebe5f3f GIT binary patch literal 11329 zcmeHNc{r5&+m~#mMbU;R**X;}OG({YEhDX3juVl|D2$j2Au6OKYOEi3Z)ahbMl_w^}fF|9cTHy=X(F}{PE0OpX+(%x9VqrEk5H)B zslNQ*E~NSj+FFcVa-B-x@uY5^WKQ*_?xrddCJ}fEQUpG4Dv95h--6;~NBreB-lJ=% zJ{QNXnCv@Ai)v_SsQ;C7d^-_%C{%NwP40qYaZQ{jGBA;Wi406+U?Kw(8Te;2K;=wg zUjYY-g$3~xmBdF9{AIe6ru_SCpY=bdbsvnyIPp1=hhH)<(FYTKFwqAS>tI3${;zf5 z+qBlUurpg}YDS?DZ7Eh{Tcv+}GqpBzBHBABsr);nWMyk=XHPz+WMf9Lw{qk%=Y?O~DNIi*=q;sqUQ z(wwikW#5HCyXOz-Q&N%t4r*^MW8N`i58FdHd!fv-I`{wLqBblLQxywHp z#e#%>3c1Zdc5|cftz)@J+OqN%`!y3n4g?E??TLXW3$%K5Q<5R>-G!ug7g(^_cFJ}& z{cN~#cQns2yaqmMCOj705dgaqkM^navS95Z;*+rBHQ+a>yD&^47gCIH!*>B;-f|6`9#QMZ2Qqdr5z~UT`Qr@k$E)@feRGb&Qss>2k zykTo>KBQhG#~#siLwuhSjaE-^yX)+_xNs&kX~sEyG)scp!>UBt=6Wbddq0b&k&LZm zLz01CBHZ<8%Sf@xhA)J=p(GtTK6?93tvQkglSY5kJtM(HSGtLWoNEqv1{HbV`I-UA zorwYIvu;74;n7P@XR46LRRzpfGM*gP||;p+Y> zkn7lVyGuL^6V9jUU7<5z!!@0ID$dmyozu7XidzoUiLRbuW>kiOibpRb6|iB-2}X$J zx**65>z1aK6d^NsqfbAp9*xu5Gu)90eI6{SW{rI4wOqlk@+kv7buc&k8B?U9sN zM-Idf4cl(zPk{E^c{Uq*@-gVz>#ZV?3s2Y@(yyQbdMnQiK3J2A-&A75UsnlpD>DZ! z8ZndeU@*&PvtyI~Km_GEKaC}xkd|0d1l#B=k7G(*XmDB2p(6DS1bHoH?T$!=F50Sr zD1}RqU{}4VCHf1bL?_)K1vg-al=NU?Tq7F4x_S1(54E`eibHq!VFnb{7~8H*A4RFi zxkFQpyP!5OkpHGxEkqq&Vs*Uo3w)!s@z2vDpQMbVe*6TXG4<1)#KThbR=L;;{`52YduV)h5+1Q_0cEDIP1xozAJL_B1;LY?NgN~c!pimn*H*qu% zLO-5PP?}eYVl@A@AB?Mj&*^lu(b}gV=tkfNVo0wYxyIdEe!=k%+7B0V{_baQ{sih|As+j*fO5D--;UGJU80gvQ!WySdLx zYm;OFb^yuJjP(-x*B6b5SJorZ-%VC7r~|eexI4`5=)zXDKAP^#55V_cHa{ya78Ybw zYY=BQj;)S0e<(iNv59JGOSZT9o!R-{f&Ja#|E<5(|9oKMo@3?T^c)@^ z(ADN^I=DQK2p-NV#-O+mcY);;D4jz&AiVe%q)ywDvH1Z5FJ&7Sd!&(}-I142oluX5 zPUuXP>nX=NF-cF!f?A+h2Q&{-uc3ba$r%Fb$qYZwELTk&tO+Z@V?FJ$5-%QNhsGlB z_kyeO0;TA(%(Z+NnJ4Mg{4g5^DAn`4K53#@LA;RNjCv3`XM8Q%CkAz|ZzpUwPr>E& zIadqc6yf1R>}+9KI`R`wY|wbsgXbdSAj~HXB<|aWnQr6cCx33@4H*_nZCku2tu+_v z-7l5O2Fk$CL-K<UPIyO&usj$@eLJE^>5;*qnQMUb;KVTz#?Y_4X&QjhMz8WfOrWtAu9WO}T~D zhhCJ)DAd5r&%1@sEiH%eAX`IUNd}Y#zjS*Xk_78?=x+wHGLXuW}UpTy)!HE{l}PDBEDuYzc*qkIThcp^*ru;pbY${?`bmI#KzWyGqfAE}5cM3|Umc|1I+r5s$W8rGJ5sl<8P4EFlXXhjMBw8+@A_t1h$ z-Y&a(5Gtm~cgchc(B?c$9MKSbg;r$gHJ>8;!DIM&zz&O#xMJ4(Q%t$HxK5Dj&%7Q0 z%RoM}O5^3&uo%~K{Fc_A^c?Y?;TK-NG*B|P^LRI@xqgU-=K?0SJRjEL{6 z3Y<|1Goqhl#azn5>np^(Q$DjmNwn`rg$>nEJn0-sIwu+Vb#LlyrPpF%M@EY5;e326 zL>XRDRtd-A?}ys+6(UW=i|r*94symN^*d%ig<{b|S=}Xc*dMd?qP|WgG+aycovKp` zWcw3A6;YY^aAtzheCPnSlgqXZfiXDaQoot8*=+h0{g)1J$1%}-5wq+HigJnb2G4cssny7-tmOAVmTOH$ij&Je(fAk!`4Q!lz z{dKo7cIP+Gp?K8FnnJYybyvfdNI6ZmKmN=1#=rJCzPq0>i@-bP=tVe=zHij8*!XDz zoSY_nlz)9PTxU1<|NKH9@}?i6T;;36`yU4P_3V2FGK)@&OwX!C)v9SDv@ZoC zHO1p%aXo?ZhBCM-`K~m6paLD#UG|1%24kpe=d6(F0TGV)7CBRc}&BB%_bbFdIzl?bw zY~1-KtG3hObb#Q4M#ZPtZ)sq1JCy;0TeIr3-9qr}&(=QY^jY8@YcjlBgFUudqc8L%oGzr{qiLt)w-EaNAj6eP z-(ko%#&FRyYtKm*$?vdDQT{=3SsPfWMqZD-Ukcf4=C?b!^#4J|tG>^e@7D)U9!{HX zk+$7B)R_shl@7S;wp7Am;j%_klT?TzxhKxr*a8B}R3B(9po5f`>tn*sWOz*%DA@X> z8W+Zlmf55?;cEqP0mt=~5PT&_HFQlO=ziQTe`s+VNVvbahT_fObwE$C*ZmF_^%D}0 zd6vNbq(!~@N9*Bslo(VUOMx3-dcp<6+kkd6Z)o`s`Ka!!Fw(fX5%0I{ZWtt&pacIv z!PI@PpvBR4vTuGPbg&y`E2Q%9qW>vJJE4p4>}o_$C(k|HuytwGV%ca=-j~-tO(+)7 z(r`gZPd4tK<8E|5sR3sUQI;=yLnds8c`>OzfzGMjIhnI{>Mzx?d zXP3*G@q8?drT=?%m`;e#e^h)F{y=)avo?QMea0`_KPNxqbNtWf&-k1t1n5r)(D-Hh zXB0?c&$o9lPudeL_L6s+*_+uAIcCU^^P#Ox<-EyBn&dm%l4wh`H=__OOgaArIgss5 zRj8!7T;P>d(n2oqDk@2q3%r_2TE+!drIJ>0fz_y_wOn9zDoL9QtU)E|aDmrQNrqhD zwN#Rkujp@BuH?}Ci5t%0d4wCzA$p7(&S83j8_uEX$PMRkJ_28`dywnoCoW#o1qGCM*z1+l-qRfgZJ)(J~ zxdoMa$@xX8dby=JC7FpuMTwPDdU)cCQj_!Zic5-0lS`&dp3=h#Ryd`HJud~KWbzbm zhSn+0j7d}4rvy#W@MiR8^5$rplEK=;no?3(kP6bolx8udvjZZ}+{0)y#m~>r>pu{H z32%myDM_8q4V^KPhXiXs*l#_y>n3mDqWylN+Lvz4|7d^Naj}Sn^R@jtih*0dEIzdV zUEH=y^#Skqcjbpqb2+}j{<7MAdx5y)``^^^{#d>K`2M=wl`}7gy|aJyz{>d0&Nuee z$teyJybtzoiFIG}koCv@S@WmKOn>oU|LsFl{Uv8_us3r?Uej^vNf|u8Hz@J=gfKl literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/test_atomic_model_atomic_stat.py b/source/tests/pd/model/test_atomic_model_atomic_stat.py new file mode 100644 index 0000000000..93aa7b8905 --- /dev/null +++ b/source/tests/pd/model/test_atomic_model_atomic_stat.py @@ -0,0 +1,431 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tempfile +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import h5py +import numpy as np +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.model.atomic_model import ( + BaseAtomicModel, + DPAtomicModel, +) +from deepmd.pd.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.path import ( + DPPath, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class FooFitting(paddle.nn.Layer, BaseFitting): + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "foo", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "bar", + [1, 2], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> list[str]: + raise NotImplementedError + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + nf, nloc, _ = descriptor.shape + ret = {} + ret["foo"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ) + .reshape([nf, nloc, *self.output_def()["foo"].shape]) + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["bar"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ) + .reshape([nf, nloc, *self.output_def()["bar"].shape]) + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + return ret + + +class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5, 6 + "atom_foo": to_paddle_tensor( + np.array([[5.0, 5.0, 5.0], [5.0, 6.0, 7.0]]).reshape(2, 3, 1) + ), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_atom_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5, 6 from atomic label. + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["foo"].shape]) + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["bar"].shape]) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + expected_std = np.ones( + (2, 2, 2), dtype=np.float64 + ) # 2 keys, 2 atypes, 2 max dims. + expected_std[0, :, :1] = np.array([0.0, 0.816496]).reshape( + 2, 1 + ) # updating std for foo based on [5.0, 5.0, 5.0], [5.0, 6.0, 7.0]] + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([5.0, 6.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + expected_std[0, :, :1] = np.array([1.24722, 0.47140]).reshape( + 2, 1 + ) # updating std for foo based on [4.0, 3.0, 2.0], [1.0, 1.0, 1.0]] + expected_ret3 = {} + # new bias [2.666, 1.333] + expected_ret3["foo"] = np.array( + [[3.6667, 4.6667, 4.3333], [6.6667, 6.3333, 7.3333]] + ).reshape(2, 3, 1) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk], decimal=4) + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + + +class TestAtomicModelStatMergeGlobalAtomic( + unittest.TestCase, TestCaseSingleFrameWithNlist +): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 0], [0, 0, 0]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5.5, nan + "atom_foo": to_paddle_tensor( + np.array([[5.0, 5.0, 5.0], [5.0, 6.0, 7.0]]).reshape(2, 3, 1) + ), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_atom_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5.5, 3 from atomic label. + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["foo"].shape]) + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["bar"].shape]) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([5.5, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + expected_ret3 = {} + # new bias [2, -5] + expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk], decimal=4) diff --git a/source/tests/pd/model/test_atomic_model_global_stat.py b/source/tests/pd/model/test_atomic_model_global_stat.py new file mode 100644 index 0000000000..abd7928a0f --- /dev/null +++ b/source/tests/pd/model/test_atomic_model_global_stat.py @@ -0,0 +1,510 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tempfile +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import h5py +import numpy as np +import paddle + +from deepmd.dpmodel.atomic_model import DPAtomicModel as DPDPAtomicModel +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.model.atomic_model import ( + BaseAtomicModel, + DPAtomicModel, +) +from deepmd.pd.model.descriptor import ( + DescrptDPA1, + DescrptSeA, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.path import ( + DPPath, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class FooFitting(paddle.nn.Layer, BaseFitting): + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "foo", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "pix", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "bar", + [1, 2], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> list[str]: + raise NotImplementedError + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + nf, nloc, _ = descriptor.shape + ret = {} + ret["foo"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["foo"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["pix"] = ( + paddle.to_tensor( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["pix"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["bar"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["bar"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + return ret + + +class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + nf, nloc, nnei = self.nlist.shape + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 1, 3 + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # no bias of pix + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + } + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + expected_std = np.ones((3, 2, 2)) # 3 keys, 2 atypes, 2 max dims. + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + ## model output on foo: [[2, 3, 6], [5, 8, 9]] given bias [1, 3] + ## foo sumed: [11, 22] compared with [5, 7], fit target is [-6, -15] + ## fit bias is [1, -8] + ## old bias + fit bias [2, -5] + ## new model output is [[3, 4, -2], [6, 0, 1]], which sumed to [5, 7] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) + # bar is too complicated to be manually computed. + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + def test_preset_bias(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + # "foo": np.array(3.0, 2.0]).reshape(2, 1), + "foo": [None, 2], + "bar": np.array([7.0, 5.0, 13.0, 11.0]).reshape(2, 1, 2), + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # foo sums: [5, 7], + # given bias of type 1 being 2, the bias left for type 0 is [5-2*1, 7-2*2] = [3,3] + # the solution of type 0 is 1.8 + foo_bias = np.array([1.8, preset_out_bias["foo"][1]]).reshape(2, 1) + bar_bias = preset_out_bias["bar"] + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + ## model output on foo: [[2.8, 3.8, 5], [5.8, 7., 8.]] given bias [1.8, 2] + ## foo sumed: [11.6, 20.8] compared with [5, 7], fit target is [-6.6, -13.8] + ## fit bias is [-7, 2] (2 is assigned. -7 is fit to [-8.6, -17.8]) + ## old bias[1.8,2] + fit bias[-7, 2] = [-5.2, 4] + ## new model output is [[-4.2, -3.2, 7], [-1.2, 9, 10]] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[-4.2, -3.2, 7.0], [-1.2, 9.0, 10.0]]).reshape( + 2, 3, 1 + ) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) + # bar is too complicated to be manually computed. + + def test_preset_bias_all_none(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + "foo": [None, None], + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + def test_serialize(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "foo", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["A", "B"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + md1 = DPAtomicModel.deserialize(md0.serialize()) + ret1 = md1.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + + for kk in ["foo"]: + np.testing.assert_almost_equal(ret0[kk], ret1[kk]) + + md2 = DPDPAtomicModel.deserialize(md0.serialize()) + args = [self.coord_ext, self.atype_ext, self.nlist] + ret2 = md2.forward_common_atomic(*args) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret0[kk], ret2[kk]) diff --git a/source/tests/pd/model/test_descriptor_dpa1.py b/source/tests/pd/model/test_descriptor_dpa1.py new file mode 100644 index 0000000000..baf3117ffc --- /dev/null +++ b/source/tests/pd/model/test_descriptor_dpa1.py @@ -0,0 +1,388 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.model.descriptor import ( + DescrptBlockSeAtten, + DescrptDPA1, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA1(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = ( + paddle.to_tensor( + cell, + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ) + .to(device=env.DEVICE) + .reshape([1, 3, 3]) + ) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = ( + paddle.to_tensor(coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION) + .reshape([1, -1, 3]) + .to(device=env.DEVICE) + ) + self.atype = ( + paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) + .reshape([1, -1]) + .to(device=env.DEVICE) + ) + self.ref_d = paddle.to_tensor( + [ + 8.382518544113587780e-03, + -3.390120566088597812e-03, + 6.145981571114964362e-03, + -4.880300873973819273e-03, + -3.390120566088597812e-03, + 1.372540996564941464e-03, + -2.484163690574096341e-03, + 1.972313058658722688e-03, + 6.145981571114964362e-03, + -2.484163690574096341e-03, + 4.507748738021747671e-03, + -3.579717194906019764e-03, + -4.880300873973819273e-03, + 1.972313058658722688e-03, + -3.579717194906019764e-03, + 2.842794615687799838e-03, + 6.733043802494966066e-04, + -2.721540313345096771e-04, + 4.936158526085561134e-04, + -3.919743287822345223e-04, + -1.311123004527576900e-02, + 5.301179352601203924e-03, + -9.614612349318877454e-03, + 7.634884975521277241e-03, + 8.877088452901006621e-03, + -3.590945566653638409e-03, + 6.508042782015627942e-03, + -5.167671664327699171e-03, + -2.697241463040870365e-03, + 1.091350446825975137e-03, + -1.976895708961905022e-03, + 1.569671412121975348e-03, + 8.645131636261189911e-03, + -3.557395265621639355e-03, + 6.298048561552698106e-03, + -4.999272007935521948e-03, + -3.557395265621639355e-03, + 1.467866637220284964e-03, + -2.587004431651147504e-03, + 2.052752235601402672e-03, + 6.298048561552698106e-03, + -2.587004431651147504e-03, + 4.594085551315935101e-03, + -3.647656549789176847e-03, + -4.999272007935521948e-03, + 2.052752235601402672e-03, + -3.647656549789176847e-03, + 2.896359275520481256e-03, + 6.689620176492027878e-04, + -2.753606422414641049e-04, + 4.864958810186969444e-04, + -3.860599754167503119e-04, + -1.349238259226558101e-02, + 5.547478630961994242e-03, + -9.835472300819447095e-03, + 7.808197926069362048e-03, + 9.220744348752592245e-03, + -3.795799103392961601e-03, + 6.716516319358462918e-03, + -5.331265718473574867e-03, + -2.783836698392940304e-03, + 1.147461939123531121e-03, + -2.025013030986024063e-03, + 1.606944814423778541e-03, + 9.280385723343491378e-03, + -3.515852178447095942e-03, + 7.085282215778941628e-03, + -5.675852414643783178e-03, + -3.515852178447095942e-03, + 1.337760635271160884e-03, + -2.679428786337713451e-03, + 2.145400621815936413e-03, + 7.085282215778941628e-03, + -2.679428786337713451e-03, + 5.414439648102228192e-03, + -4.338426468139268931e-03, + -5.675852414643783178e-03, + 2.145400621815936413e-03, + -4.338426468139268931e-03, + 3.476467482674507146e-03, + 7.166961981167455130e-04, + -2.697932188839837972e-04, + 5.474643906631899504e-04, + -4.386556623669893621e-04, + -1.480434821331240956e-02, + 5.604647062899507579e-03, + -1.130745349141585449e-02, + 9.059113563516829268e-03, + 9.758791063112262978e-03, + -3.701477720487638626e-03, + 7.448215522796466058e-03, + -5.966057584545172120e-03, + -2.845102393948158344e-03, + 1.078743584169829543e-03, + -2.170093031447992756e-03, + 1.738010461687942770e-03, + 9.867599071916231118e-03, + -3.811041717688905522e-03, + 7.121877634386481262e-03, + -5.703120290113914553e-03, + -3.811041717688905522e-03, + 1.474046183772771213e-03, + -2.747386907428428938e-03, + 2.199711055637492037e-03, + 7.121877634386481262e-03, + -2.747386907428428938e-03, + 5.145050639440944609e-03, + -4.120642824501622239e-03, + -5.703120290113914553e-03, + 2.199711055637492037e-03, + -4.120642824501622239e-03, + 3.300262321758350853e-03, + 1.370499995344566383e-03, + -5.313041843655797901e-04, + 9.860110343046961986e-04, + -7.892505817954784597e-04, + -1.507686316307561489e-02, + 5.818961290579217904e-03, + -1.088774506142304276e-02, + 8.719460408506790952e-03, + 9.764630842803939323e-03, + -3.770134041110058572e-03, + 7.049438389985595785e-03, + -5.645302934019884485e-03, + -3.533582373572779437e-03, + 1.367148320603491559e-03, + -2.546602904764623705e-03, + 2.038882844528267305e-03, + 7.448297038731285964e-03, + -2.924276815200288742e-03, + 5.355960540523636154e-03, + -4.280386435083473329e-03, + -2.924276815200288742e-03, + 1.150311064893848757e-03, + -2.100635980860638373e-03, + 1.678427895009850001e-03, + 5.355960540523636154e-03, + -2.100635980860638373e-03, + 3.853607053247790071e-03, + -3.080076301871465493e-03, + -4.280386435083473329e-03, + 1.678427895009850001e-03, + -3.080076301871465493e-03, + 2.461876613756722523e-03, + 9.730712866459405395e-04, + -3.821759579990726546e-04, + 6.994242056622360787e-04, + -5.589662297882965055e-04, + -1.138916742131982317e-02, + 4.469391132927387489e-03, + -8.192016282448397885e-03, + 6.547234460517113892e-03, + 7.460070829043288082e-03, + -2.929867802018087421e-03, + 5.363646855497249989e-03, + -4.286347242903034739e-03, + -2.643569023340565718e-03, + 1.038826463247002245e-03, + -1.899910089750410976e-03, + 1.518237240362583541e-03, + ], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(device=env.DEVICE) + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa1.pd" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pd" + + def test_descriptor_block(self) -> None: + # paddle.seed(0) + model_dpa1 = self.model_json + dparams = model_dpa1["descriptor"] + ntypes = len(model_dpa1["type_map"]) + assert "se_atten" == dparams.pop("type") + dparams["ntypes"] = ntypes + des = DescrptBlockSeAtten( + **dparams, + ).to(env.DEVICE) + state_dict = paddle.load(str(self.file_model_param)) + # this is an old state dict, modify manually + state_dict["compress_info.0"] = des.compress_info[0] + state_dict["compress_data.0"] = des.compress_data[0] + des.set_state_dict(state_dict) + coord = self.coord + atype = self.atype + box = self.cell + # handle type_embedding + type_embedding = TypeEmbedNet(ntypes, 8, use_tebd_bias=True).to(env.DEVICE) + type_embedding.set_state_dict(paddle.load(str(self.file_type_embed))) + + ## to save model parameters + # paddle.save(des.state_dict(), 'model_weights.pd') + # paddle.save(type_embedding.state_dict(), 'model_weights.pd') + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + nlist, + extended_coord, + extended_atype, + type_embedding(extended_atype), + mapping=None, + ) + # np.savetxt('tmp.out', descriptor.detach().numpy().reshape(1,-1), delimiter=",") + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + def test_descriptor(self) -> None: + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams.pop("type") == "se_atten" + dparams["concat_output_tebd"] = False + dparams["use_tebd_bias"] = True + des = DescrptDPA1( + **dparams, + ).to(env.DEVICE) + target_dict = des.state_dict() + source_dict = paddle.load(str(self.file_model_param)) + type_embd_dict = paddle.load(str(self.file_type_embed)) + target_dict = translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, + ) + des.set_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA1( + **dparams, + ).to(env.DEVICE) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + tk = "se_atten." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = vv + assert len(type_embd_dict.keys()) == 2 + it = iter(type_embd_dict.keys()) + for _ in range(2): + kk = next(it) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + record[all_keys.index("se_atten.compress_data.0")] = True + record[all_keys.index("se_atten.compress_info.0")] = True + assert all(record) + return target_dict diff --git a/source/tests/pd/model/test_dpa1.py b/source/tests/pd/model/test_dpa1.py new file mode 100644 index 0000000000..285dd3d4cd --- /dev/null +++ b/source/tests/pd/model/test_dpa1.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor.dpa1 import DescrptDPA1 as DPDescrptDPA1 +from deepmd.pd.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDescrptSeAtten(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, sm, to, tm, prec, ect in itertools.product( + [False, True], # resnet_dt + [False, True], # smooth_type_embedding + [False, True], # type_one_side + ["concat", "strip"], # tebd_input_mode + [ + "float64", + ], # precision + [False, True], # use_econf_tebd + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + + # dpa1 new impl + dd0 = DescrptDPA1( + self.rcut, + self.rcut_smth, + self.sel_mix, + self.nt, + attn_layer=2, + precision=prec, + resnet_dt=idt, + smooth_type_embedding=sm, + type_one_side=to, + tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + dd0.se_atten.mean = paddle.to_tensor(davg, dtype=dtype).to( + device=env.DEVICE + ) + dd0.se_atten.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptDPA1.deserialize(dd0.serialize()) + rd1, _, _, _, _ = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # dp impl + dd2 = DPDescrptDPA1.deserialize(dd0.serialize()) + rd2, _, _, _, _ = dd2.call( + self.coord_ext, + self.atype_ext, + self.nlist, + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd2, + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + + def test_jit( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec, sm, to, tm, ect in itertools.product( + [ + False, + ], # resnet_dt + [ + "float64", + ], # precision + [False, True], # smooth_type_embedding + [ + False, + ], # type_one_side + ["concat", "strip"], # tebd_input_mode + [False, True], # use_econf_tebd + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # dpa1 new impl + dd0 = DescrptDPA1( + self.rcut, + self.rcut_smth, + self.sel, + self.nt, + precision=prec, + resnet_dt=idt, + smooth_type_embedding=sm, + type_one_side=to, + tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ) + dd0.se_atten.mean = paddle.to_tensor(davg, dtype=dtype).to( + device=env.DEVICE + ) + dd0.se_atten.dstd = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + # dd1 = DescrptDPA1.deserialize(dd0.serialize()) + model = paddle.jit.to_static(dd0) + # model = paddle.jit.to_static(dd1) diff --git a/source/tests/pd/model/test_permutation_denoise.py b/source/tests/pd/model/test_permutation_denoise.py new file mode 100644 index 0000000000..a0de541f0b --- /dev/null +++ b/source/tests/pd/model/test_permutation_denoise.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + get_generator, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = paddle.float64 + +model_dpa1 = copy.deepcopy(model_dpa1) +model_dpa2 = copy.deepcopy(model_dpa2) +model_hybrid = copy.deepcopy(model_hybrid) +model_dpa1["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa1.pop("fitting_net") +model_dpa2["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa2.pop("fitting_net") +model_hybrid["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_hybrid.pop("fitting_net") + + +class PermutationDenoiseTest: + def test( + self, + ) -> None: + generator = get_generator(GLOBAL_SEED) + natoms = 5 + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, + coord[idx_perm].unsqueeze(0), + cell.unsqueeze(0), + atype[idx_perm], + denoise=True, + ) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + np.testing.assert_allclose( + ret0["updated_coord"][idx_perm].numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"][idx_perm].numpy(), + ret1["logits"].numpy(), + rtol=prec, + atol=prec, + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, PermutationDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, PermutationDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model( + model_params, + ).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestDenoiseModelHybrid(unittest.TestCase, TestPermutationDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# self.type_split = True +# self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_rot_denoise.py b/source/tests/pd/model/test_rot_denoise.py new file mode 100644 index 0000000000..74d5d41791 --- /dev/null +++ b/source/tests/pd/model/test_rot_denoise.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation_denoise import ( # model_dpa2, + model_dpa1, +) + +dtype = paddle.float64 + + +class RotDenoiseTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + prec = 1e-10 + natoms = 5 + cell = 10.0 * paddle.eye(3, dtype=dtype).to(env.DEVICE) + coord = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + shift = paddle.to_tensor([4, 4, 4], dtype=dtype).to(env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + from scipy.stats import ( + special_ortho_group, + ) + + rmat = paddle.to_tensor(special_ortho_group.rvs(3), dtype=dtype).to(env.DEVICE) + + # rotate only coord and shift to the center of cell + coord_rot = paddle.matmul(coord, rmat) + update_c0, logits0 = eval_model( + self.model, + (coord + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c0 = update_c0 - (coord + shift).unsqueeze(0) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + (coord_rot + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c1 = update_c1 - (coord_rot + shift).unsqueeze(0) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + np.testing.assert_allclose( + paddle.matmul(ret0["updated_coord"], rmat).numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + + # rotate coord and cell + paddle.seed(0) + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + coord_rot = paddle.matmul(coord, rmat) + cell_rot = paddle.matmul(cell, rmat) + update_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + coord_rot.unsqueeze(0), + cell_rot.unsqueeze(0), + atype, + denoise=True, + ) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + np.testing.assert_allclose( + paddle.matmul(ret0["updated_coord"], rmat).numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, RotDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# self.type_split = True +# self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_saveload_dpa1.py b/source/tests/pd/model/test_saveload_dpa1.py new file mode 100644 index 0000000000..54a82e479a --- /dev/null +++ b/source/tests/pd/model/test_saveload_dpa1.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + DataLoader, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet(systems, batch_size, type_map) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadDPA1(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as fin: + self.config = json.load(fin) + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + batch_sampler=paddle.io.BatchSampler( + sampler=paddle.io.RandomSampler(self.dataset), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda x: x[0], + ) + device = paddle.get_device() + paddle.set_device("cpu") + self.training_data = BufferedIterator(iter(self.training_dataloader)) + paddle.set_device(device) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pd"): + wrapper = self.create_wrapper(read) + optimizer = paddle.optimizer.Adam( + learning_rate=self.start_lr, parameters=wrapper.parameters() + ) + optimizer.clear_grad() + if read: + wrapper.set_state_dict(paddle.load(model_file)) + os.remove(model_file) + else: + paddle.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self, read: bool): + model_config = copy.deepcopy(self.config["model"]) + model_config["resuming"] = read + model_config["stat_file_dir"] = "stat_files" + model_config["stat_file"] = "stat.hdf5" + model_config["stat_file_path"] = os.path.join( + model_config["stat_file_dir"], model_config["stat_file"] + ) + model = get_model(model_config).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item].to(env.DEVICE) + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item].to(env.DEVICE) + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + for item in result1: + np.testing.assert_allclose(result1[item].numpy(), result2[item].numpy()) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_trans_denoise.py b/source/tests/pd/model/test_trans_denoise.py new file mode 100644 index 0000000000..8317d4d2ae --- /dev/null +++ b/source/tests/pd/model/test_trans_denoise.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation_denoise import ( + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = paddle.float64 + + +class TransDenoiseTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + shift = (paddle.rand([3], dtype=dtype) - 0.5).to(env.DEVICE) * 2.0 + coord_s = paddle.matmul( + paddle.remainder( + paddle.matmul(coord + shift, paddle.linalg.inv(cell)), 1.0 + ), + cell, + ) + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c0 = updated_c0 - coord.unsqueeze(0) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, coord_s.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c1 = updated_c1 - coord_s.unsqueeze(0) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + np.testing.assert_allclose( + ret0["updated_coord"].numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestDenoiseModelHybrid(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py index ffdce9bbb2..c3d65c09df 100644 --- a/source/tests/pd/test_training.py +++ b/source/tests/pd/test_training.py @@ -15,6 +15,9 @@ from deepmd.pd.entrypoints.main import ( get_trainer, ) +from deepmd.pd.utils.env import ( + enable_prim, +) from deepmd.pd.utils.finetune import ( get_finetune_rules, ) @@ -146,6 +149,9 @@ def setUp(self) -> None: self.config["model"] = deepcopy(model_se_e2_a) self.config["training"]["numb_steps"] = 1 self.config["training"]["save_freq"] = 1 + # import paddle + enable_prim(True) + # assert paddle.framework.core._is_eager_prim_enabled() def tearDown(self) -> None: DPTrainTest.tearDown(self) From 56e079c12f92bafd8514c529e80051c8efae1d72 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 19:46:03 +0800 Subject: [PATCH 48/58] update typos --- deepmd/pd/model/model/make_model.py | 1 - deepmd/pd/model/network/network.py | 4 +-- deepmd/pd/model/task/fitting.py | 2 +- source/tests/pd/model/test_descriptor_dpa1.py | 25 +++++++++---------- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index 8a9ee225e0..2b9a4b5bec 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -125,7 +125,6 @@ def enable_compression( check_frequency, ) - # cannot use the name forward. torch script does not work def forward_common( self, coord, diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index a135bcbb9f..1974e526a0 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -86,12 +86,12 @@ def get_full_embedding(self, device: str): Parameters ---------- - device : torch.device + device : str The device on which to perform the computation. Returns ------- - type_embedding : torch.Tensor + type_embedding : paddle.Tensor The full type embeddings of all types. The last index corresponds to the zero padding. Shape: (ntypes + 1) x tebd_dim """ diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index 1eed95645b..d9db44aff5 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -212,7 +212,7 @@ def __init__( self.register_buffer( "case_embd", paddle.zeros(self.dim_case_embd, dtype=self.prec, place=device), - # torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[0], + # paddle.eye(self.dim_case_embd, dtype=self.prec, place=device)[0], ) else: self.case_embd = None diff --git a/source/tests/pd/model/test_descriptor_dpa1.py b/source/tests/pd/model/test_descriptor_dpa1.py index baf3117ffc..bfcf4ba6ee 100644 --- a/source/tests/pd/model/test_descriptor_dpa1.py +++ b/source/tests/pd/model/test_descriptor_dpa1.py @@ -64,16 +64,12 @@ def setUp(self): 6.575011420004332585e00, 6.825240650611076099e00, ] - self.coord = ( - paddle.to_tensor(coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION) - .reshape([1, -1, 3]) - .to(device=env.DEVICE) - ) - self.atype = ( - paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) - .reshape([1, -1]) - .to(device=env.DEVICE) - ) + self.coord = paddle.to_tensor( + coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION, place=env.DEVICE + ).reshape([1, -1, 3]) + self.atype = paddle.to_tensor( + [0, 0, 0, 1, 1], dtype=paddle.int32, place=env.DEVICE + ).reshape([1, -1]) self.ref_d = paddle.to_tensor( [ 8.382518544113587780e-03, @@ -238,7 +234,8 @@ def setUp(self): 1.518237240362583541e-03, ], dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ).to(device=env.DEVICE) + place=env.DEVICE, + ) with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: self.model_json = json.load(fp) self.file_model_param = Path(CUR_DIR) / "models" / "dpa1.pd" @@ -249,7 +246,8 @@ def test_descriptor_block(self) -> None: model_dpa1 = self.model_json dparams = model_dpa1["descriptor"] ntypes = len(model_dpa1["type_map"]) - assert "se_atten" == dparams.pop("type") + assert "se_atten" == dparams["type"] + dparams.pop("type") dparams["ntypes"] = ntypes des = DescrptBlockSeAtten( **dparams, @@ -305,7 +303,8 @@ def test_descriptor(self) -> None: ntypes = len(model_dpa2["type_map"]) dparams = model_dpa2["descriptor"] dparams["ntypes"] = ntypes - assert dparams.pop("type") == "se_atten" + assert dparams["type"] == "se_atten" + dparams.pop("type") dparams["concat_output_tebd"] = False dparams["use_tebd_bias"] = True des = DescrptDPA1( From 3d70e7c51879afd3b2ef8b3a491dfc84bb9b8773 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 20:02:12 +0800 Subject: [PATCH 49/58] update code --- deepmd/pd/train/training.py | 30 ++++++++++-------- deepmd/pd/utils/dataloader.py | 57 +++++++++++++++++++++++------------ 2 files changed, 55 insertions(+), 32 deletions(-) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 6f1a39266c..33af073849 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -106,7 +106,7 @@ def __init__( shared_links=None, finetune_links=None, init_frz_model=None, - ): + ) -> None: """Construct a DeePMD trainer. Args: @@ -707,20 +707,17 @@ def step(_step_id, task_key="Default"): if self.gradient_max_norm > 0.0: with nvprof_context(enable_profiling, "Gradient clip"): - grad_norm = paddle.nn.utils.clip_grad_norm_( - self.wrapper.parameters(), self.gradient_max_norm + paddle.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), + self.gradient_max_norm, + error_if_nonfinite=True, ) - if not paddle.isfinite(grad_norm).all(): - # check local gradnorm single GPU case, trigger NanDetector - raise FloatingPointError("gradients are Nan/Inf") with nvprof_context(enable_profiling, "Adam update"): self.optimizer.step() self.scheduler.step() - if enable_profiling: - core.nvprof_nvtx_pop() else: raise ValueError(f"Not supported optimizer type '{self.opt_type}'") @@ -729,7 +726,7 @@ def step(_step_id, task_key="Default"): if self.display_in_training and ( display_step_id % self.disp_freq == 0 or display_step_id == 1 ): - self.wrapper.eval() + self.wrapper.eval() # Will set to train mode before fininshing validation def log_loss_train(_loss, _more_loss, _task_key="Default"): results = {} @@ -835,6 +832,7 @@ def log_loss_valid(_task_key="Default"): learning_rate=None, ) ) + self.wrapper.train() current_time = time.time() train_time = current_time - self.t0 @@ -891,9 +889,13 @@ def log_loss_valid(_task_key="Default"): writer.add_scalar(f"{task_key}/loss", loss, display_step_id) for item in more_loss: writer.add_scalar( - f"{task_key}/{item}", more_loss[item].item(), _step_id + f"{task_key}/{item}", more_loss[item].item(), display_step_id ) + if enable_profiling: + core.nvprof_nvtx_pop() + + self.wrapper.train() self.t0 = time.time() self.total_train_time = 0.0 for step_id in range(self.num_steps): @@ -989,7 +991,7 @@ def log_loss_valid(_task_key="Default"): "files, which can be viewd in NVIDIA Nsight Systems software" ) - def save_model(self, save_path, lr=0.0, step=0): + def save_model(self, save_path, lr=0.0, step=0) -> None: module = ( self.wrapper.module if dist.is_available() and dist.is_initialized() @@ -1085,7 +1087,7 @@ def get_data(self, is_train=True, task_key="Default"): log_dict["sid"] = batch_data["sid"] return input_dict, label_dict, log_dict - def print_header(self, fout, train_results, valid_results): + def print_header(self, fout, train_results, valid_results) -> None: train_keys = sorted(train_results.keys()) print_str = "" print_str += "# {:5s}".format("step") @@ -1116,7 +1118,9 @@ def print_header(self, fout, train_results, valid_results): fout.write(print_str) fout.flush() - def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): + def print_on_training( + self, fout, step_id, cur_lr, train_results, valid_results + ) -> None: train_keys = sorted(train_results.keys()) print_str = "" print_str += f"{step_id:7d}" diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py index 7a2bf4fe9c..9d59ea0da7 100644 --- a/deepmd/pd/utils/dataloader.py +++ b/deepmd/pd/utils/dataloader.py @@ -183,6 +183,7 @@ def __next__(self): return next(self.item) self.iters = [] + for item in self.dataloaders: self.iters.append(LazyIter(item)) @@ -196,7 +197,7 @@ def set_noise(self, noise_settings): for system in self.systems: system.set_noise(noise_settings) - def __len__(self): + def __len__(self) -> int: return len(self.dataloaders) def __getitem__(self, idx): @@ -219,19 +220,21 @@ def print_summary( name: str, prob: list[float], ): - print_summary( - name, - len(self.systems), - [ss.system for ss in self.systems], - [ss._natoms for ss in self.systems], - self.batch_sizes, - [ - ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) - for ii, ss in enumerate(self.systems) - ], - prob, - [ss._data_system.pbc for ss in self.systems], - ) + rank = dist.get_rank() if dist.is_initialized() else 0 + if rank == 0: + print_summary( + name, + len(self.systems), + [ss.system for ss in self.systems], + [ss._natoms for ss in self.systems], + self.batch_sizes, + [ + ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) + for ii, ss in enumerate(self.systems) + ], + prob, + [ss._data_system.pbc for ss in self.systems], + ) _sentinel = object() @@ -239,13 +242,13 @@ def print_summary( class BackgroundConsumer(Thread): - def __init__(self, queue, source, max_len): + def __init__(self, queue, source, max_len) -> None: Thread.__init__(self) self._queue = queue self._source = source # Main DL iterator self._max_len = max_len # - def run(self): + def run(self) -> None: for item in self._source: self._queue.put(item) # Blocking if the queue is full @@ -254,7 +257,7 @@ def run(self): class BufferedIterator: - def __init__(self, iterable): + def __init__(self, iterable) -> None: self._queue = queue.Queue(QUEUESIZE) self._iterable = iterable self._consumer = None @@ -263,7 +266,7 @@ def __init__(self, iterable): self.warning_time = None self.total = len(iterable) - def _create_consumer(self): + def _create_consumer(self) -> None: self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) self._consumer.daemon = True self._consumer.start() @@ -271,7 +274,7 @@ def _create_consumer(self): def __iter__(self): return self - def __len__(self): + def __len__(self) -> int: return self.total def __next__(self): @@ -337,3 +340,19 @@ def get_weighted_sampler(training_data, prob_style, sys_prob=False): len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) return sampler + + +def get_sampler_from_params(_data, _params): + if ( + "sys_probs" in _params and _params["sys_probs"] is not None + ): # use sys_probs first + _sampler = get_weighted_sampler( + _data, + _params["sys_probs"], + sys_prob=True, + ) + elif "auto_prob" in _params: + _sampler = get_weighted_sampler(_data, _params["auto_prob"]) + else: + _sampler = get_weighted_sampler(_data, "prob_sys_size") + return _sampler From 8b5b4a8fd8b92c715ae35308b5e1f9cbd05980bc Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 21:21:01 +0800 Subject: [PATCH 50/58] fix coverage --- deepmd/pd/entrypoints/main.py | 18 +++++-- deepmd/pd/train/training.py | 60 +++++++++++++++-------- deepmd/pd/train/wrapper.py | 67 ++------------------------ source/tests/pd/model/test_autodiff.py | 2 - 4 files changed, 55 insertions(+), 92 deletions(-) diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index 19653d6ea7..3fa66312e7 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -230,7 +230,7 @@ def train( use_pretrain_script: bool = False, force_load: bool = False, output: str = "out.json", -): +) -> None: log.info("Configuration path: %s", input_file) SummaryPrinter()() with open(input_file) as fin: @@ -321,10 +321,18 @@ def train( # save min_nbor_dist if min_nbor_dist is not None: if not multi_task: - trainer.model.min_nbor_dist = min_nbor_dist + trainer.model.min_nbor_dist = paddle.to_tensor( + min_nbor_dist, + dtype=paddle.float64, + place=DEVICE, + ) else: for model_item in min_nbor_dist: - trainer.model[model_item].min_nbor_dist = min_nbor_dist[model_item] + trainer.model[model_item].min_nbor_dist = paddle.to_tensor( + min_nbor_dist[model_item], + dtype=paddle.float64, + place=DEVICE, + ) trainer.run() @@ -332,7 +340,7 @@ def freeze( model: str, output: str = "frozen_model.json", head: Optional[str] = None, -): +) -> None: paddle.set_flags( { "FLAGS_save_cf_stack_op": 1, @@ -383,7 +391,7 @@ def change_bias( numb_batch: int = 0, model_branch: Optional[str] = None, output: Optional[str] = None, -): +) -> None: if input_file.endswith(".pd"): old_state_dict = paddle.load(input_file) model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 33af073849..65e35a1c4b 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -50,7 +50,7 @@ ) from deepmd.pd.utils.dataloader import ( BufferedIterator, - get_weighted_sampler, + get_sampler_from_params, ) from deepmd.pd.utils.env import ( DEVICE, @@ -168,19 +168,7 @@ def get_opt_param(params): def get_data_loader(_training_data, _validation_data, _training_params): def get_dataloader_and_buffer(_data, _params): - if "auto_prob" in _training_params["training_data"]: - _sampler = get_weighted_sampler( - _data, _params["training_data"]["auto_prob"] - ) - elif "sys_probs" in _training_params["training_data"]: - _sampler = get_weighted_sampler( - _data, - _params["training_data"]["sys_probs"], - sys_prob=True, - ) - else: - _sampler = get_weighted_sampler(_data, "prob_sys_size") - + _sampler = get_sampler_from_params(_data, _params) if _sampler is None: log.warning( "Sampler not specified!" @@ -201,14 +189,16 @@ def get_dataloader_and_buffer(_data, _params): return _dataloader, _data_buffered training_dataloader, training_data_buffered = get_dataloader_and_buffer( - _training_data, _training_params + _training_data, _training_params["training_data"] ) if _validation_data is not None: ( validation_dataloader, validation_data_buffered, - ) = get_dataloader_and_buffer(_validation_data, _training_params) + ) = get_dataloader_and_buffer( + _validation_data, _training_params["validation_data"] + ) valid_numb_batch = _training_params["validation_data"].get( "numb_btch", 1 ) @@ -283,7 +273,7 @@ def get_lr(lr_params): self.opt_type, self.opt_param = get_opt_param(training_params) # Model - self.model = get_model_for_wrapper(model_params) + self.model = get_model_for_wrapper(model_params, resuming=resuming) # Loss if not self.multi_task: @@ -495,7 +485,7 @@ def collect_single_finetune_params( _new_state_dict, _origin_state_dict, _random_state_dict, - ): + ) -> None: _new_fitting = _finetune_rule_single.get_random_fitting() _model_key_from = _finetune_rule_single.get_model_branch() target_keys = [ @@ -668,7 +658,7 @@ def run(self): core.nvprof_start() core.nvprof_enable_record_event() - def step(_step_id, task_key="Default"): + def step(_step_id, task_key="Default") -> None: # Paddle Profiler if enable_profiling: core.nvprof_nvtx_push(f"Training step {_step_id}") @@ -886,7 +876,7 @@ def log_loss_valid(_task_key="Default"): display_step_id % self.tensorboard_freq == 0 or display_step_id == 1 ): writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id) - writer.add_scalar(f"{task_key}/loss", loss, display_step_id) + writer.add_scalar(f"{task_key}/loss", loss.item(), display_step_id) for item in more_loss: writer.add_scalar( f"{task_key}/{item}", more_loss[item].item(), display_step_id @@ -1195,7 +1185,7 @@ def get_single_model( return model -def get_model_for_wrapper(_model_params): +def get_model_for_wrapper(_model_params, resuming=False): if "model_dict" not in _model_params: _model = get_single_model( _model_params, @@ -1203,13 +1193,41 @@ def get_model_for_wrapper(_model_params): else: _model = {} model_keys = list(_model_params["model_dict"]) + do_case_embd, case_embd_index = get_case_embd_config(_model_params) for _model_key in model_keys: _model[_model_key] = get_single_model( _model_params["model_dict"][_model_key], ) + if do_case_embd and not resuming: + # only set case_embd when from scratch multitask training + _model[_model_key].set_case_embd(case_embd_index[_model_key]) return _model +def get_case_embd_config(_model_params): + assert ( + "model_dict" in _model_params + ), "Only support setting case embedding for multi-task model!" + model_keys = list(_model_params["model_dict"]) + sorted_model_keys = sorted(model_keys) + numb_case_embd_list = [ + _model_params["model_dict"][model_key] + .get("fitting_net", {}) + .get("dim_case_embd", 0) + for model_key in sorted_model_keys + ] + if not all(item == numb_case_embd_list[0] for item in numb_case_embd_list): + raise ValueError( + f"All models must have the same dimension of case embedding, while the settings are: {numb_case_embd_list}" + ) + if numb_case_embd_list[0] == 0: + return False, {} + case_embd_index = { + model_key: idx for idx, model_key in enumerate(sorted_model_keys) + } + return True, case_embd_index + + def model_change_out_bias( _model, _sample_func, diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py index c3643f8372..dcdd1f7dd4 100644 --- a/deepmd/pd/train/wrapper.py +++ b/deepmd/pd/train/wrapper.py @@ -26,7 +26,7 @@ def __init__( loss: paddle.nn.Layer | dict = None, model_params=None, shared_links=None, - ): + ) -> None: """Construct a DeePMD model wrapper. Args: @@ -64,74 +64,13 @@ def __init__( self.loss[task_key] = loss[task_key] self.inference_only = self.loss is None - def share_params(self, shared_links, resume=False): + def share_params(self, shared_links, resume=False) -> None: """ Share the parameters of classes following rules defined in shared_links during multitask training. If not start from checkpoint (resume is False), some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ - supported_types = ["descriptor", "fitting_net"] - for shared_item in shared_links: - class_name = shared_links[shared_item]["type"] - shared_base = shared_links[shared_item]["links"][0] - class_type_base = shared_base["shared_type"] - model_key_base = shared_base["model_key"] - shared_level_base = shared_base["shared_level"] - if "descriptor" in class_type_base: - if class_type_base == "descriptor": - base_class = self.model[model_key_base].get_descriptor() - elif "hybrid" in class_type_base: - raise NotImplementedError( - "Hybrid descriptor is not implemented yet" - ) - else: - raise RuntimeError(f"Unknown class_type {class_type_base}!") - for link_item in shared_links[shared_item]["links"][1:]: - class_type_link = link_item["shared_type"] - model_key_link = link_item["model_key"] - shared_level_link = int(link_item["shared_level"]) - assert ( - shared_level_link >= shared_level_base - ), "The shared_links must be sorted by shared_level!" - assert ( - "descriptor" in class_type_link - ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" - if class_type_link == "descriptor": - link_class = self.model[model_key_link].get_descriptor() - elif "hybrid" in class_type_link: - raise NotImplementedError( - "Hybrid descriptor is not implemented yet" - ) - else: - raise RuntimeError(f"Unknown class_type {class_type_link}!") - link_class.share_params( - base_class, shared_level_link, resume=resume - ) - log.warning( - f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" - ) - else: - if hasattr(self.model[model_key_base], class_type_base): - base_class = self.model[model_key_base].__getattr__(class_type_base) - for link_item in shared_links[shared_item]["links"][1:]: - class_type_link = link_item["shared_type"] - model_key_link = link_item["model_key"] - shared_level_link = int(link_item["shared_level"]) - assert ( - shared_level_link >= shared_level_base - ), "The shared_links must be sorted by shared_level!" - assert ( - class_type_base == class_type_link - ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" - link_class = self.model[model_key_link].__getattr__( - class_type_link - ) - link_class.share_params( - base_class, shared_level_link, resume=resume - ) - log.warning( - f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" - ) + raise NotImplementedError("share_params is not implemented yet") def forward( self, diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py index a056491fb3..1bd9dd0d0f 100644 --- a/source/tests/pd/model/test_autodiff.py +++ b/source/tests/pd/model/test_autodiff.py @@ -190,7 +190,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) @@ -198,7 +197,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) From e74d27274e6297a34cb79b22844ca53f31df5ece Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 30 Nov 2024 15:13:46 +0800 Subject: [PATCH 51/58] update consistent check of dpa1 --- .../tests/consistent/descriptor/test_dpa1.py | 44 +++++++++++++++++++ source/tests/consistent/model/test_dpa1.py | 28 ++++++++++++ 2 files changed, 72 insertions(+) diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index 8be219f5ea..92b2c6bd0b 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -18,6 +18,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -39,6 +40,10 @@ from deepmd.jax.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1JAX else: DescriptorDPA1JAX = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.dpa1 import DescrptDPA1 as DescrptDPA1PD +else: + DescrptDPA1PD = None if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1Strict else: @@ -187,6 +192,34 @@ def skip_dp(self) -> bool: temperature, ) + @property + def skip_pd(self) -> bool: + ( + tebd_dim, + tebd_input_mode, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + smooth_type_embedding, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_PD or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + temperature, + ) + @property def skip_jax(self) -> bool: ( @@ -287,6 +320,7 @@ def skip_tf(self) -> bool: tf_class = DescrptDPA1TF dp_class = DescrptDPA1DP pt_class = DescrptDPA1PT + pd_class = DescrptDPA1PD jax_class = DescriptorDPA1JAX array_api_strict_class = DescriptorDPA1Strict @@ -387,6 +421,16 @@ def eval_jax(self, jax_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: return self.eval_array_api_strict_descriptor( array_api_strict_obj, diff --git a/source/tests/consistent/model/test_dpa1.py b/source/tests/consistent/model/test_dpa1.py index 774c624ac7..8b8fab7ae1 100644 --- a/source/tests/consistent/model/test_dpa1.py +++ b/source/tests/consistent/model/test_dpa1.py @@ -14,6 +14,7 @@ from ..common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, SKIP_FLAG, @@ -37,6 +38,11 @@ model_args, ) +if INSTALLED_PD: + from deepmd.pd.model.model import get_model as get_model_pd + from deepmd.pd.model.model.ener_model import EnergyModel as EnergyModelPD +else: + EnergyModelPD = None if INSTALLED_JAX: from deepmd.jax.model.ener_model import EnergyModel as EnergyModelJAX from deepmd.jax.model.model import get_model as get_model_jax @@ -90,6 +96,7 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + pd_class = EnergyModelPD jax_class = EnergyModelJAX args = model_args() @@ -102,6 +109,8 @@ def get_reference_backend(self): return self.RefBackend.PT if not self.skip_tf: return self.RefBackend.TF + if not self.skip_pd: + return self.RefBackend.PD if not self.skip_jax: return self.RefBackend.JAX if not self.skip_dp: @@ -119,6 +128,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_dp(data) elif cls is EnergyModelPT: return get_model_pt(data) + elif cls is EnergyModelPD: + return get_model_pd(data) elif cls is EnergyModelJAX: return get_model_jax(data) return cls(**data, **self.additional_data) @@ -190,6 +201,15 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_model( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def eval_jax(self, jax_obj: Any) -> Any: return self.eval_jax_model( jax_obj, @@ -225,6 +245,14 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret[3].ravel(), ret[4].ravel(), ) + elif backend is self.RefBackend.PD: + return ( + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["force"].flatten(), + ret["virial"].flatten(), + ret["atom_virial"].flatten(), + ) elif backend is self.RefBackend.JAX: return ( ret["energy_redu"].ravel(), From 4520e9755589f157f3ac15697e81c0bf9b999145 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 1 Dec 2024 23:02:59 +0800 Subject: [PATCH 52/58] add unittests code of dpa2 and replace several decomp. API to paddle. API --- deepmd/pd/model/descriptor/dpa2.py | 105 ++++-- deepmd/pd/model/descriptor/repformer_layer.py | 2 +- deepmd/pd/model/descriptor/repformers.py | 122 ++++--- deepmd/pd/model/descriptor/se_t_tebd.py | 7 +- deepmd/pd/model/network/network.py | 13 +- .../tests/consistent/descriptor/test_dpa2.py | 51 +++ source/tests/pd/model/models/dpa2.json | 57 +++ source/tests/pd/model/models/dpa2.pd | Bin 0 -> 119535 bytes source/tests/pd/model/test_autodiff.py | 43 ++- source/tests/pd/model/test_descriptor_dpa2.py | 207 +++++++++++ source/tests/pd/model/test_dpa2.py | 333 ++++++++++++++++++ source/tests/pd/model/test_forward_lower.py | 15 +- source/tests/pd/model/test_null_input.py | 12 +- source/tests/pd/model/test_permutation.py | 1 - source/tests/pd/model/test_rot.py | 1 - source/tests/pd/model/test_rot_denoise.py | 11 +- source/tests/pd/model/test_smooth.py | 31 ++ source/tests/pd/model/test_trans.py | 1 - source/tests/pd/model/test_unused_params.py | 92 +++++ source/tests/pd/test_finetune.py | 15 +- source/tests/pd/test_multitask.py | 40 +++ source/tests/pd/test_training.py | 19 +- source/tests/pd/test_update_sel.py | 62 +++- 23 files changed, 1077 insertions(+), 163 deletions(-) create mode 100644 source/tests/pd/model/models/dpa2.json create mode 100644 source/tests/pd/model/models/dpa2.pd create mode 100644 source/tests/pd/model/test_descriptor_dpa2.py create mode 100644 source/tests/pd/model/test_dpa2.py create mode 100644 source/tests/pd/model/test_unused_params.py diff --git a/deepmd/pd/model/descriptor/dpa2.py b/deepmd/pd/model/descriptor/dpa2.py index 8fbffe2d90..e0ec0a501d 100644 --- a/deepmd/pd/model/descriptor/dpa2.py +++ b/deepmd/pd/model/descriptor/dpa2.py @@ -25,9 +25,11 @@ TypeEmbedNetConsistent, ) from deepmd.pd.utils import ( - decomp, env, ) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) from deepmd.pd.utils.nlist import ( build_multiple_neighbor_list, get_multiple_nlist_key, @@ -93,7 +95,7 @@ def __init__( use_econf_tebd: bool = False, use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, - ): + ) -> None: r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. Parameters @@ -156,6 +158,7 @@ def init_subclass_params(sub_data, sub_class): self.repinit_args = init_subclass_params(repinit, RepinitArgs) self.repformer_args = init_subclass_params(repformer, RepformerArgs) + self.tebd_input_mode = self.repinit_args.tebd_input_mode self.repinit = DescrptBlockSeAtten( self.repinit_args.rcut, @@ -265,6 +268,7 @@ def init_subclass_params(sub_data, sub_class): ) self.concat_output_tebd = concat_output_tebd self.precision = precision + self.prec = PRECISION_DICT[self.precision] self.smooth = smooth self.exclude_types = exclude_types self.env_protection = env_protection @@ -307,6 +311,7 @@ def init_subclass_params(sub_data, sub_class): # set trainable for param in self.parameters(): param.stop_gradient = not trainable + self.compress = False def get_rcut(self) -> float: """Returns the cut-off radius.""" @@ -344,11 +349,11 @@ def get_dim_emb(self) -> int: return self.repformers.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -370,11 +375,11 @@ def get_env_protection(self) -> float: # the env_protection of repinit is the same as that of the repformer return self.repinit.get_env_protection() - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -387,33 +392,18 @@ def share_params(self, base_class, shared_level, resume=False): "type_embedding" ] self.repinit.share_params(base_class.repinit, 0, resume=resume) + if self.use_three_body: + self.repinit_three_body.share_params( + base_class.repinit_three_body, 0, resume=resume + ) self._sub_layers["g1_shape_tranform"] = base_class._sub_layers[ "g1_shape_tranform" ] self.repformers.share_params(base_class.repformers, 0, resume=resume) # shared_level: 1 - # share all parameters in type_embedding and repinit - elif shared_level == 1: - self._sub_layers["type_embedding"] = base_class._sub_layers[ - "type_embedding" - ] - self.repinit.share_params(base_class.repinit, 0, resume=resume) - # shared_level: 2 - # share all parameters in type_embedding and repformers - elif shared_level == 2: - self._sub_layers["type_embedding"] = base_class._sub_layers[ - "type_embedding" - ] - self._sub_layers["g1_shape_tranform"] = base_class._sub_layers[ - "g1_shape_tranform" - ] - self.repformers.share_params(base_class.repformers, 0, resume=resume) - # shared_level: 3 # share all parameters in type_embedding - elif shared_level == 3: - self._sub_layers["type_embedding"] = base_class._sub_layers[ - "type_embedding" - ] + elif shared_level == 1: + self._modules["type_embedding"] = base_class._modules["type_embedding"] # Other shared levels else: raise NotImplementedError @@ -486,7 +476,7 @@ def compute_input_stats( self, merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, - ): + ) -> None: """ Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. @@ -743,12 +733,15 @@ def forward( The smooth switch function. shape: nf x nloc x nnei """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) + use_three_body = self.use_three_body nframes, nloc, nnei = nlist.shape nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 # nlists nlist_dict = build_multiple_neighbor_list( - extended_coord, + extended_coord.detach(), nlist, self.rcut_list, self.nsel_list, @@ -756,6 +749,10 @@ def forward( # repinit g1_ext = self.type_embedding(extended_atype) g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None g1, _, _, _, _ = self.repinit( nlist_dict[ get_multiple_nlist_key(self.repinit.get_rcut(), self.repinit.get_nsel()) @@ -764,6 +761,7 @@ def forward( extended_atype, g1_ext, mapping, + type_embedding, ) if use_three_body: assert self.repinit_three_body is not None @@ -778,6 +776,7 @@ def forward( extended_atype, g1_ext, mapping, + type_embedding, ) g1 = paddle.concat([g1, g1_three_body], axis=-1) # linear to change shape @@ -793,7 +792,7 @@ def forward( .unsqueeze(-1) .expand([-1, -1, g1.shape[-1]]) ) - g1_ext = decomp.take_along_axis(g1, mapping_ext, 1) + g1_ext = paddle.take_along_axis(g1, mapping_ext, 1) g1 = g1_ext # repformer g1, g2, h2, rot_mat, sw = self.repformers( @@ -806,11 +805,17 @@ def forward( extended_atype, g1, mapping, - comm_dict, + comm_dict=comm_dict, ) if self.concat_output_tebd: g1 = paddle.concat([g1, g1_inp], axis=-1) - return g1, rot_mat, g2, h2, sw + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + h2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) @classmethod def update_sel( @@ -824,7 +829,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -847,6 +852,14 @@ def update_sel( True, ) local_jdata_cpy["repinit"]["nsel"] = repinit_sel[0] + min_nbor_dist, repinit_three_body_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repinit"]["three_body_rcut"], + local_jdata_cpy["repinit"]["three_body_sel"], + True, + ) + local_jdata_cpy["repinit"]["three_body_sel"] = repinit_three_body_sel[0] min_nbor_dist, repformer_sel = update_sel.update_one_sel( train_data, type_map, @@ -856,3 +869,29 @@ def update_sel( ) local_jdata_cpy["repformer"]["nsel"] = repformer_sel[0] return local_jdata_cpy, min_nbor_dist + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statistics (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + # do some checks before the mocel compression process + raise NotImplementedError("enable_compression is not implemented yet") diff --git a/deepmd/pd/model/descriptor/repformer_layer.py b/deepmd/pd/model/descriptor/repformer_layer.py index 816e16b05d..1243437298 100644 --- a/deepmd/pd/model/descriptor/repformer_layer.py +++ b/deepmd/pd/model/descriptor/repformer_layer.py @@ -110,7 +110,7 @@ def _make_nei_g1( # index: nb x (nloc x nnei) x ng1 index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, ng1]) # gg1 : nb x (nloc x nnei) x ng1 - gg1 = decomp.take_along_axis(g1_ext, axis=1, indices=index) + gg1 = paddle.take_along_axis(g1_ext, axis=1, indices=index) # gg1 : nb x nloc x nnei x ng1 gg1 = gg1.reshape([nb, nloc, nnei, ng1]) return gg1 diff --git a/deepmd/pd/model/descriptor/repformers.py b/deepmd/pd/model/descriptor/repformers.py index 9340f60067..47d92317df 100644 --- a/deepmd/pd/model/descriptor/repformers.py +++ b/deepmd/pd/model/descriptor/repformers.py @@ -20,7 +20,6 @@ MLPLayer, ) from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.env import ( @@ -32,9 +31,6 @@ from deepmd.pd.utils.exclude_mask import ( PairExcludeMask, ) -from deepmd.pd.utils.spin import ( - concat_switch_virtual, -) from deepmd.pd.utils.utils import ( ActivationFn, ) @@ -419,7 +415,7 @@ def forward( g2, h2 = paddle.split(dmatrix, [1, 3], axis=-1) else: # g2, h2 = paddle.linalg.norm(diff, axis=-1, keepdim=True), diff - g2, h2 = decomp.norm(diff, axis=-1, keepdim=True), diff + g2, h2 = paddle.linalg.norm(diff, axis=-1, keepdim=True), diff g2 = g2 / self.rcut h2 = h2 / self.rcut # nb x nloc x nnei x ng2 @@ -441,65 +437,65 @@ def forward( # g1_ext: nb x nall x ng1 if comm_dict is None: assert mapping is not None - g1_ext = decomp.take_along_axis(g1, axis=1, indices=mapping) + g1_ext = paddle.take_along_axis(g1, axis=1, indices=mapping) else: - raise NotImplementedError("Not impl yet") - has_spin = "has_spin" in comm_dict - if not has_spin: - n_padding = nall - nloc - g1 = paddle.nn.functional.pad( - g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 - ) - real_nloc = nloc - real_nall = nall - else: - # for spin - real_nloc = nloc // 2 - real_nall = nall // 2 - real_n_padding = real_nall - real_nloc - g1_real, g1_virtual = paddle.split( - g1, [real_nloc, real_nloc], axis=1 - ) - # mix_g1: nb x real_nloc x (ng1 * 2) - mix_g1 = paddle.concat([g1_real, g1_virtual], axis=2) - # nb x real_nall x (ng1 * 2) - g1 = paddle.nn.functional.pad( - mix_g1.squeeze(0), (0, 0, 0, real_n_padding), value=0.0 - ) - - assert "send_list" in comm_dict - assert "send_proc" in comm_dict - assert "recv_proc" in comm_dict - assert "send_num" in comm_dict - assert "recv_num" in comm_dict - assert "communicator" in comm_dict - ret = paddle.ops.deepmd.border_op( - comm_dict["send_list"], - comm_dict["send_proc"], - comm_dict["recv_proc"], - comm_dict["send_num"], - comm_dict["recv_num"], - g1, - comm_dict["communicator"], - paddle.to_tensor( - real_nloc, - dtype=paddle.int32, - place=env.DEVICE, - ), # should be int of c++ - paddle.to_tensor( - real_nall - real_nloc, - dtype=paddle.int32, - place=env.DEVICE, - ), # should be int of c++ - ) - g1_ext = ret[0].unsqueeze(0) - if has_spin: - g1_real_ext, g1_virtual_ext = paddle.split( - g1_ext, [ng1, ng1], axis=2 - ) - g1_ext = concat_switch_virtual( - g1_real_ext, g1_virtual_ext, real_nloc - ) + raise NotImplementedError("Not implemented yet") + # has_spin = "has_spin" in comm_dict + # if not has_spin: + # n_padding = nall - nloc + # g1 = paddle.nn.functional.pad( + # g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 + # ) + # real_nloc = nloc + # real_nall = nall + # else: + # # for spin + # real_nloc = nloc // 2 + # real_nall = nall // 2 + # real_n_padding = real_nall - real_nloc + # g1_real, g1_virtual = paddle.split( + # g1, [real_nloc, real_nloc], axis=1 + # ) + # # mix_g1: nb x real_nloc x (ng1 * 2) + # mix_g1 = paddle.concat([g1_real, g1_virtual], axis=2) + # # nb x real_nall x (ng1 * 2) + # g1 = paddle.nn.functional.pad( + # mix_g1.squeeze(0), (0, 0, 0, real_n_padding), value=0.0 + # ) + + # assert "send_list" in comm_dict + # assert "send_proc" in comm_dict + # assert "recv_proc" in comm_dict + # assert "send_num" in comm_dict + # assert "recv_num" in comm_dict + # assert "communicator" in comm_dict + # ret = paddle.ops.deepmd.border_op( + # comm_dict["send_list"], + # comm_dict["send_proc"], + # comm_dict["recv_proc"], + # comm_dict["send_num"], + # comm_dict["recv_num"], + # g1, + # comm_dict["communicator"], + # paddle.to_tensor( + # real_nloc, + # dtype=paddle.int32, + # place=env.DEVICE, + # ), # should be int of c++ + # paddle.to_tensor( + # real_nall - real_nloc, + # dtype=paddle.int32, + # place=env.DEVICE, + # ), # should be int of c++ + # ) + # g1_ext = ret[0].unsqueeze(0) + # if has_spin: + # g1_real_ext, g1_virtual_ext = paddle.split( + # g1_ext, [ng1, ng1], axis=2 + # ) + # g1_ext = concat_switch_virtual( + # g1_real_ext, g1_virtual_ext, real_nloc + # ) g1, g2, h2 = ll.forward( g1_ext, g2, diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py index 31fb06045e..31cf352d33 100644 --- a/deepmd/pd/model/descriptor/se_t_tebd.py +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -26,7 +26,6 @@ TypeEmbedNetConsistent, ) from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.env import ( @@ -834,7 +833,7 @@ def forward( index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) # nb x (nloc x nnei) x nt # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) - atype_tebd_nlist = decomp.take_along_axis( + atype_tebd_nlist = paddle.take_along_axis( atype_tebd_ext, axis=1, indices=index ) # nb x nloc x nnei x nt @@ -858,7 +857,7 @@ def forward( # nf x (nl x nnei) nlist_index = nlist.reshape([nb, nloc * nnei]) # nf x (nl x nnei) - nei_type = decomp.take_along_axis( + nei_type = paddle.take_along_axis( extended_atype, indices=nlist_index, axis=1 ) # nfnl x nnei @@ -892,7 +891,7 @@ def forward( ).reshape(-1, nt * 2) tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) # (nfnl x nt_i x nt_j) x ng - gg_t = decomp.take_along_axis(tt_full, indices=idx, axis=0) + gg_t = paddle.take_along_axis(tt_full, indices=idx, axis=0) # (nfnl x nt_i x nt_j) x ng gg_t = gg_t.reshape(nfnl, nnei, nnei, ng) if self.smooth: diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index 1974e526a0..49b3dc975f 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -80,20 +80,23 @@ def forward(self, atype): """ return self.embedding(atype.place)[atype] - def get_full_embedding(self, device: str): + def get_full_embedding(self, device: Union[str, paddle.base.libpaddle.Place]): """ - Get the type embeddings of all types. + Retrieve the type embeddings for all types. Parameters ---------- - device : str + device : Union[str, paddle.base.libpaddle.Place] The device on which to perform the computation. + It can be specified as: + - str: "cpu", "gpu", "gpu:0", etc., indicating the device type. + - paddle.base.libpaddle.Place: an device of paddle Tensor. Returns ------- type_embedding : paddle.Tensor - The full type embeddings of all types. The last index corresponds to the zero padding. - Shape: (ntypes + 1) x tebd_dim + The complete set of type embeddings, including a zero-padding entry at the last index. + Shape: (ntypes + 1, tebd_dim) """ return self.embedding(device) diff --git a/source/tests/consistent/descriptor/test_dpa2.py b/source/tests/consistent/descriptor/test_dpa2.py index 72c0967a78..ef840bf9d7 100644 --- a/source/tests/consistent/descriptor/test_dpa2.py +++ b/source/tests/consistent/descriptor/test_dpa2.py @@ -17,6 +17,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, CommonTest, parameterized, @@ -34,6 +35,12 @@ from deepmd.jax.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2JAX else: DescrptDPA2JAX = None + +if INSTALLED_PD: + from deepmd.pd.model.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2PD +else: + DescrptDPA2PD = None + if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2Strict else: @@ -214,6 +221,39 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt + @property + def skip_pd(self) -> bool: + ( + repinit_tebd_input_mode, + repinit_set_davg_zero, + repinit_type_one_side, + repinit_use_three_body, + repformer_update_g1_has_conv, + repformer_direct_dist, + repformer_update_g1_has_drrd, + repformer_update_g1_has_grrg, + repformer_update_g1_has_attn, + repformer_update_g2_has_g1g1, + repformer_update_g2_has_attn, + repformer_update_h2, + repformer_attn2_has_gate, + repformer_update_style, + repformer_update_residual_init, + repformer_set_davg_zero, + repformer_trainable_ln, + repformer_ln_eps, + repformer_use_sqrt_nnei, + repformer_g1_out_conv, + repformer_g1_out_mlp, + smooth, + exclude_types, + precision, + add_tebd_to_repinit_out, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_PD or precision == "bfloat16" + @property def skip_dp(self) -> bool: ( @@ -286,6 +326,7 @@ def skip_tf(self) -> bool: tf_class = DescrptDPA2TF dp_class = DescrptDPA2DP pt_class = DescrptDPA2PT + pd_class = DescrptDPA2PD jax_class = DescrptDPA2JAX array_api_strict_class = DescrptDPA2Strict args = descrpt_dpa2_args().append(Argument("ntypes", int, optional=False)) @@ -383,6 +424,16 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_jax(self, jax_obj: Any) -> Any: return self.eval_jax_descriptor( jax_obj, diff --git a/source/tests/pd/model/models/dpa2.json b/source/tests/pd/model/models/dpa2.json new file mode 100644 index 0000000000..f83e319de3 --- /dev/null +++ b/source/tests/pd/model/models/dpa2.json @@ -0,0 +1,57 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 30, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "activation_function": "tanh" + + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 10, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": true, + "update_g2_has_g1g1": true, + "update_g2_has_attn": true, + "attn2_has_gate": true, + "use_sqrt_nnei": false, + "g1_out_conv": false, + "g1_out_mlp": false + }, + "seed": 1, + "add_tebd_to_repinit_out": false + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pd/model/models/dpa2.pd b/source/tests/pd/model/models/dpa2.pd new file mode 100644 index 0000000000000000000000000000000000000000..650f0c144e7c2dae9a265e61ed73d4eb29fa6dc7 GIT binary patch literal 119535 zcmeFZXH=Ehwk=8!5ky4=5m8YMLK~WIis&d!a`@C0GyY_wep8Kcxv22a@;rwPFV~)^ApUZlK4;cw5$uB=oy*XWI zj5UmO40KGn^fgosyj|!G%=C?{xb7PoYjEkC>6z-N8XK!xdArak8*AJ*G%ztWHoI@? z?d9!4@#}-$E|do9KOgb(mLPHP79&;jcJ!9<=9D0nAd}cHvBA+>o5F=c-PFoRUl91)5B~Op zzy09vbMSW@_&*&7e$CoEzh!Mr9X(SGV`V*6D-B~4E&~ly3q#`vCR{vR|M(3Lm%gg0 zv5w{MG(~g63 z5_T=e8X)j!LaWENG8}ogeZ)SIfF6e*#yd!5z=PiFCpxbrgUyA~)UzKW!PWYp`igN8 zJTl0+CC=gn(`C0>0?5kn6`ywF35g`sXAr;W%v291-6^0_}aR6#|Ky@c-RVl))XptM}cgDb2q`CoP1;QjO*=bC>Qc#^B! zE}agB0U7oU?^K#0hdcXu7-t>)iZa(hHgkC2`Q#U> z#xbA^JksB|cNrzUo<=@8Jo|IGzwE|*e~iEeiA_J<_!q`&)j4R=^-vYwy|jNcL#+c; zPDytO&qv_v2cJyGb<5EplOu4Utr#s*NmYv6BI@YsA;MBQqnU z8_mo^NpnKGQTGV-X*S_jw0!G*%s(R=EoM{XbJgpR_mqPEvQ0GXzuI8o;?RWWB26y^ z3s0lo#T>E^%$>k-jX^cra00S}uFsDLBC_9T4SslX0{5$?kc9B}!wbVYO0CKkpfkQ! zx;4BDE3V1dPLj323q`AFnbqN+e)X5#mhX>@xap@6&-*BNxYRWVp+y?~k?u`X$Ue7g z=jRM3cwpSv>L&6DZ05JwidcuDqWQU~n=4~5VB(%~f$AIDFJ3Xp)0_GEI=sJsij@Bj zR{vXr759Gt^BwQuQik}@1=Y~fXIw$%^kYq-AGh2T#Z+I@5~y`9rnG;m}a?On7T|Y(^>73oD0r(J(pY!nvn`;h#d~Avn)g&fLZV&mUh( zCUq)@vbdN>?pKpRPVKVFa!x)JIJ6XB%`e08{?)9Wfp9*rz`Mtn_Qg8 zayk@$Qxo7$jE5}ma^1M#OicA-@_gA=2Z8)E7bV;MQTUwdX}i+}cw#3jnfi1g1SSur z(dO2HvF&E9^)zBn-&UcL$Xx|Z6}uc=l>;EoQL&}YAQ$cXYfR^9cwzSKs%q}Xa;Uld zkwSMZ7n$YWgv!(v0hjD`<$_%~$SFFv<$G)s78YE1IqKvCr@H5O$^@ctQ22JCeYZY7 zB2{z`oJs{zCZ(#J{bg9eMVZ z1AIzf>~g9=v6k73*GCfZHFr!SSA8CiiCFoCaVFrM7v0h+7b?MO(^NpEybp$SHb0*b ztN^~Do=1I6De(BJP@WpGx!kiT|EBj;7M_r{A1SEI$ABcWVZMF&5TKy`vBj_kGum2* zS>G3d$)RIMRV*`bo5pRHGatgCHuA_o#+ooLZ~abs+O-;L{p%WZCklYIzNmbYdlDX$ zyW3@_9sxh>3&xeEDnYDi*;!gG7nWxBx{;A4!ILUSlUt1Wz{@UoU(U!Jx_ts3*^+0V zIi=Ri;VA;L?Hcb?&rZgtGt$Zq4dq~xKNFmBvKIOpEY^>FEy0UDRU@Rn4`KWE^+H|y zOd!A4EX({g55zORrhGPNMS9sD`#YS`D0SB`pmcjM@JcA2a3SP^&^MFNT!vh1;@w+K zl~@K6-L6H?Srg!sz!N*E6#~T5Q@JxiA$X34u#d}SV1V?bQlCl&YUyrz!#1CUWd6aW z^WPB<*wdFXoGXRkcIl4EebumA$mJZ5K@E^UdN(({FAhv7m7*B$SK%W2RmHRRdC(La z=yjbs3ZMJVX!Culh5J(%4l5ll0C75_eZhwsQO{9>vcW$H=_aSLOOMBb4gb4FM)w+c zbFfu`Mxh1oYFwxB_i!ST0nsZ5Fbgd^s2&|mdBkx1?h7O-{@chu|JIh!D_J>m2q8qcJbAxe! zuQ!ogkkY9N%_1N*W74!!b`GuzH~IXKFGa?}@_Faeg`jwi_f(@#5*FFsuo`|<3JZg? z{6>7mu;bA?+PzuHaE;UW(+3Y9tbMSty@RU=BFvODEnD;9(xgvu%EvTZCKwFH4z{8H z)t9p-FJsYIelV8WD*^Ilukd7i4TZdtZ@TNG-k==t-unDh4L(1%a%8!x1#N`Noli)I z;y|c1)5%j|@a~}cUda=PF`x3r>Axj`eUrhB`9>d1WuY)xO}557msf*(HxN*irGoYP zAc9u3Y-antWZXPY@g<8X2+m4AU<)B+<3iuH$P;R@$SKp6yO^JjsScjsC7cQHE%D-{ z((4*rThp0%dpZX)qSmVQq$MHgyYc{?E=)F8Q&ytw z+?@LbxpYKWQJi@+3lkmuIvizU zadGi$( zJ+I*Owmlo%C<@^At?8y1T5B9wGdgL!$reN%N#7lfEI`R@PXYB*C1f_!ka1P5gY-jM z`ywYoP;}vgcSv3YlzJ@9jSiGTP=m`zHU|M3KAajpv^NjmQc~aZVk*XcpQSeyisoT- z%iOTz8-KV{{mD4or~!^>=`hclHN(zzvF3A2$*8L^v$iti2V1`~6~3e>gftQrk_@vl z7%o|7v7$|bcdChvw+3S1%}|`|CgoHJkGDQd$SJ@NQI<@%PnSdC{n7LJyR+fdLS(iN zNhS{7IG&TM(uQuUIa|jBe1Jpt&?)l^`7koknIP{_f(0g;D`#jjaQ89JwQWZn@os|a z;p_wX_$7=SXzeQDb-;%-#n>V|wx^Ejp@%N|XCD#Njnsx?t1_om!z{5ec;8I|)}Wi3 z>|MI(Fz_lSTvB{TK*eN*G^IPC@F*@o{flufUYCrz+dY?wVMD?;R9Boq`RMB9UaCxN zJ!_XTNmhQX$Adg9?#BX>OFTIcuuNhJ)fFBFFpx$_*w-r7S?O2K5f zlJR;`955*9hMl<73Y#=^TFu?V@KL`e`@{x6SkDc-b9*Kjn-88=`aWOm;rR?Bv^* z6XSs-H`c4=0<&Qc8!xgK{MI=*d?AOBbTq9%2TKe5w|-ES|8R(Qr?5VIFP_ zs~21*tAe1(GZhnqjxg3JU`7{PgT}sBbYz%{aBS-0RSKfte%oE9v;T7pb|~!E6}q2+ zz834p*2rFf#hDOvpvXbWZ5zLtJ*~lUz7I*H3$MXr;}MS-xhQlFeEO1twGyW|WogPq z5D&ej^nLI-3TbwU^7Q)_VS~K$CsMu`AV0FZgIy{Qg(SJE-rOq#c5lY?Bqu*?=;B_K-o!t zkBukOFt>C@zTv7nQYq2&&y`D7i;t50bYQk4Bd^!D884J%(MiAewzJmuznWh#oxk!aqG4 zGE&%R<-r}-Vth z>W}_#a8l2`20w4n9dmV-1)=Q&*&X~@5XiUSvMS>a)M7yc23>J*wxu~OW;Gp89^TUL zpHq#BV?*a#nVm5vXg1e3D+^<~H$EYpslgw##&>I`i_wp3;Y;|lGT?Zjol>D)iLwE= zV-8&LfN+A$rV#fE|+w^5qP;caKrrxj6%U)Y=UF-oWG5`=k90XPMt@kgM$6`H0uGjLn*U-OTe)Nr6 zI69Lp?rHTd!VMh(y=Tn}&?JA=N%iJq2-tN&a>LD1oKy5UEFBVy1G!gqIbLSLa;1wc z$DFTDnxD$&cB2ltlXxFRL1Wfq+p0O`J7t`^+c+c5d^r2P1 z(BPE~am_^&tt?n@=o4JQw4EY(BWmTFlqnig1jW4hQQT8qa{o_nT!$qQ7C zK8tmUCnNh4#*%_P-?nf-S(pj z^p6egp$_wfL#5AeODrK|=F;QXqjY$&uTy~2F%Ls7Y!GHX7y%z#jC{gFv#>UcGpP}p z;G^&Ap|m@-kf^!bM(>Z8fqsK*^oF$zpE=c8(<- zjL-Lkpf<-0!scRFIA<_>OE?E-Iv*72KsM69zsVrQR0;_KhEk&8>8NwnBO%p06OMj8 zq<@P$3htMf-O3^qL9NV!kLHmAEWDj0AdymtjI=>gVecD1nI9YRa9Op-^Gg(Pw7W zaD_G!i;4A($;{mmJW-`Ct?sE>00`uf_|N122N=!$7cO)BIRm2vn=AdMKAPV-ze8#|IZ7cR&k=YEc?~ z-m`pXDj^(0Y2Hg8y`7D5-$JNu6Wt-qL55U`B_AI9(|(ZM*@A3K3iHDbSzs@v9^fQX zgjBm9@TEGW!HZUL`fut4+~~=nSf`Z-t@i376+*cvT@bSS#(F8r&JJ)Ylr;gArAZ*5Ag?t$8Y04A(gXVw#3f^L;cYYOfSozi&-qcKWGWtf2{2A zXFD1KvgYIY5oC(g;X zVVD!i_x}1E=%em-$CPI1QtjBmpGyFrc^h@Z#$3Fk#5ZebmI&(1(zbBgq_9rjd_y{H7_m_~qUs3D>U-`U z7-)frJ$}zlmbc->$J=5UIg((mm}G~Has-T=Z-{fC9>MDA_3NTD9UyL%N!m>4Mphd_ zsvl<&PMjUnuE}eK6;+jsNuwiRaqCI>ogW=PS*Jg-#vhI{exsk57uWd?GtQw8$3bc# zo(af&?dG4=v4|Pw_bJV@CV`?)%(m<8yPv4D{hry4j16^_wRpAu1out@Ili8xYs{BOgZ`8yNf}m0<1s&85(= zX5iGfPNU>;M<*|lfZLawkY2CmE#YGXj??T{7@+HdC`Gb056f=QIZS_JSw0Ch1@rSJ z-nE10fNX5f>tW#KUEP>dI1J(sjtXdxwL+4Z@IduIBl^A8DW`UP4U@7H-!{H0M{yFC z!-M-fU`2&*A?s`g-hD6@)9|zjXOI6dw~s4^fM{Khl|!Y_R!5x@o!bo@nw$Kx(z|dG z)9$Lbx53Vo5Z+SVQXJ-d`k>#w76s24zo6%?Lct9_fmzNycKdNI9Hat{q@6aNSSI26=AN!CB=+=w|o2G?Zrk` z=A}&JQ;TiiOKi$dBrI>49zodWx`I-|2rQ!eV**^_5yb>QrQ0EZwcZ}=t#pYlagq5R33i$F;l zObH}Vf=3K^tI&L}S1X0CkEaFAwCdofhF8Lm>xCH2QLjF#m4Ww#rjz$S?)+(sqrYbh zRZ~*~J}qT^RU;zFZ1nKKA5+Z~5*vR~%?pWiH0?eHeVAAz=nzX&f#m9vEDgrB(0g*n z!Gq3K=xKM&>0{>$Sf33Q=RMXB1i6z*U%$n{(c@KHC7io}>)n9=vV|GcafU}y{+NQ& zZYK5m{RG&vI(zWhXfHhLW2d)EaWFRxm^#Aw#^4iXABW-aYK>2_$N-*^lgp~#CeT1M?6r2llL%Tf6iSnj@gTKB9gY>RX5sxKo_UmRMW$_2B3qW6oA zCe^J`MqNl8{t*mNfq-XUDg64k);6J$fnwb{6%B96}I3jeDC+u!6oUS1)+(uIj z@4HjQD7_~^F{4#iw4n#=n!a?C?<#~1t zCe#9qE9LFf*3BSB(_89(Arq6g29LJN*W!Zyh2fi$U8rrBYbSiG0Cro-3s=*%rtS;KYwpgz7I_|TC;Sql_CvNa_VP};CGwe z^_TMm9M9KgcHY~8Q;+4zpgtBDmVzpZy!$ZZn?*k5>2%z!61-cKllXVy!iIWsWtgL2 z+jFfBu0N#-{QpP6=V$lM+dhVAE!{U5eq zyc}O+?`$3fn$25zJnsJKHNR-O{+-wS7dU@Vko=YcKONdrG&8V>`fqK|O6g$M4}EZ|%P&_ZjVGMmQ(UTp3ya+edoR_(kJpE;IDYW~lXn^POlA4Vf3zZo zX;&G}eJ>#N&IaS~{Hv~dv0ltFRpHf)D#dGl^bJ>}n{h&3Yu6dq8i*b6QxaWl!;!;* zsu!sTAzhoWZU2Ex*v4<`C}r$}v-z`O!5?#QJ$AFy^EP5N{^hprncF7lDLfV>we z8858dzTFB%qOSQ_t7ULoZ}r-i{3=x8|9G%lydD*wk+ewI5HKKv>w{op3touHYuY@W z1$-YjiH#_?K!RjyYEoVGPaiw{@2v6QPom)_i7h{sis#8mNP2jRf3Ud`@Q`XNl{;w_ zDxHni`aUxX+YP-2-R|}QM>1>NQ(}`#wRLm@zy4eN!oOx5UsVYwzT|yAbhR0*9*5uF z7|{Yu*+TYo`)iSs)HAW<;23HaNnD(fXoJxPQeW1>a8S%+X5k<AdBCG%BDt)VM11~$z~V;Y(C$iw zMs`XoUJ^R3IwFyW^%~9!jw9s|_p;V0biN-NTsHNNTjheU)2!VAn^EBOVMw^Sw-5-k zibwaBy#?oU=36dbY=siKt>SE7ZGh*RZV7W&GD?5+BRM@%1>N!tV@BH|aVc1JIH$oD z#J8whI>fbsjoMZU`^XxkF;T^23xug(SFi+~Dz^nhwaMp)_`( ziUZov;MBtf5BsvoB?%xsWdbA5f4xd|iE8GKjj;w!~V5op6G}XISN6R3ZSw_;vsSW79 zM>_Nz@qpol=dT>(N`c3B2d#!yEDp8lOc{4JBZYFn?%O=c_%iB|)JM+-Wa3M?ee+@+ zDy*E}J7(C1iqDVp?V>J)9~pWOyC)A6Noa@-T@ldgq)EFh=0neVS1cV>5nLL41Z3Z< z@EX&;GoJ`;$ozd$RPT5x-iRzVuYMhhgaPxjWto|H$@Off($O~5RyuqpAifpsjt*M7 zzpMlT$wO~=*NzcRk|gd&!yu-kgkR0OAMWWEBsOeqgX2+oB{TQBL2>gGP4iL)=<>To zO5YEK;m?#D3>3;VqGgEW_?H<9sn}B3xOy*wkF^C9#4PZoC;<}M zUd0~~u0X}tr^3!W@PqTR^o=nWN3lfyp&Lfz0?VLLQWa|hdbcYEU6?3D7P1JSa47tF z9FP6!as00=^M?}nO}~xbNr)P*{%1`0Bi$>2z%6EczcvNR&E&Z@5wgbc)3hyT7fec)XMS2kn}*0S3Ec_KlQ}4B4>c%G5bMxkG>9}BJ|LNYAP)O|Gb!^Dj0SKcV1r$5mh-0-aywF0x z;5UoYvouXeW5rhAI@Jr02HPlhT9v?lI{&j&42YR)Z1Fyu%3vjAA!f^ld^mHnB_exA zHfqwl#d z?!xn&I(FmWrfp~$vta?N!|z?Jav6d3k9A>RcYZ>~9}_R-dU~LXVIy z`icvgw>4=xhkm}w?mJS-!xy zMj0mdJ^_|*WY-J*sKj=syV6D*yfCZ`7)IqvV3f^hpL}^E-ikUolVg&Elb;wF?gV+` zxx>fEGyB4D^NEuFIhh=cW}>*4Pv?NAbtvi{-*f@D^27QYb`zQ4(Zb6^6M3*}g=;|K zy*KpS5prc(jKeJ4Cx!W$m?`+0igw3q*qL`J`bN_TO@nJ0?T)M)`JOPRDQ?JO)?O9z3$fyYFVL{!ozKTgPy3p=6uk%A_dA$@x1I+ z2?YvD>e=Vcp6DjE|FH~TB?{-u4Jb~B<5H$z+K>n&->40TYvcf6)l{5g@BN?EPiRtDcQ zk9zAE!-I8p0s%TT((Cezyq{epQQ zaKodu<8BegZYt1?(l3T@Idx}AYJ;IFE%^!R9ohvj1 z(m*$P_tZAWHW(z_4Dr_~gUNR_32#1D!&0i+0l6I+C}quggvzlRZY2-j_$HbRdRoU{ zMIWt!Ox=;|SMAG?fV8H6=$I};!L78woR64s1w8`Gbwei%W z=P?6G17$?I%D{_r$%XJqyCqxTRweLkAZ%b$ECurvkxBb|4*02$k$tVG3dbFJczcfO8>-2s&uhlq-`$mVX%u}EJ?YKcwU1GUiH;U)08 zKM&T1RL-oQ34?}EQ-NphQ7HOK^}fLeqH=_Di|hhz3}%~llsTp4LZ{jdd{AA6U%Qy; z1DK;hI%(Up&mBcr&cq_iGFORu=kx-T8VK0FI39eZ*Bn1LKE3xag@8?INk_I0=E2#B zTfBlFV?pH+$xZj8h47_MOU#kJ1~#WF2u#HZg8esHu>Doj3_Qb zHir;9j%)eoQJ^Z3O08sAx+hO1Ff=m6uD>U^|#KKVh!IR?>Zg7-3c7|c6AW~vFu zK)QamD^V6gsF(1?qI)bJG=8|*j&CVM!=%M8MR)RG?s&RhGm#rVdQ*^P>1{41PgTYz z6P0)My&ju`18dQBD15uyUM6R5aj6|gIysIKqjfu8zPJjPKfaeZ$NrPLQL1iI~ zo$t(382<27bEdTfqz+oEDHpqe^iGeLMKYPdvG_UM%Q_dGvW2%%D-z@3ZNDE1vDxt9 zC~MUbw?;fw_BrfoaTp$r8@Ieao&sjCH!Ppr8~~~ZA_Xq7CLsq|TrHZGo0WV|(NEa@5$TW;Q|Pj(g--Vd_RQl9}zHd{dAC6eG-5U2Vj;zh?OH zeRvywskp8FVlo%h++N7Lcjdwv%cWf(RmxzkPSZ`y_aP3R?q;6bo(cBe`U<B*bJo5c~2`6m5nQH=!$4z(}s7h~|QIpO3+ zB}5W&61&c}EVKw3mKJbn!Wr-GF9)BOV=w)9qZn%-HnZHm$DEXo%hTkpw_ZiVbou68 zvf&<}T3x>Rx@I|c+>7$xv!xzr##~Z8H{^g<Tc7DDnm+qOWaFuYg%V|}Hv1~f<)50Z&IgG!HHa!bn)@H=8{&zckl zEc-a`+WMtomvhNnnO7$2^YgZQ+UEk-_L0L<-yD%bu(M@VG#>iU*AGXP2?=zW! z0kL?Xio4Q&Bmi-LnXNjnVJ2jUO=yR7Rb$h|DFZ)+T(GIO2I{32yk*hqay&i;H1=R> zM2-$RIo@@A&hrKjtjv>E=?{QykF1CXoi39?JaOIrXSZU>@IVmH`2G4kpaiHUMQ&q@3Jh30zXxV>Bs9!0Zcq zXr+CMvEjz-?)t!X*km@5QbAS*cDLW3VfM?xc&>o^;thpZyuS8oVMh|!>?S!%_vH=R z+?x3uQcw#N`=_Y2`3sOxN~JYCnFA%CXLC#p!|>2&x47jOWstLze<>QGA#GkQ;FD=J z&Wp8bC(4#09@40H^Dz%v zVx6&tT~a!x9n(9LgkDy9qOiGl3~Q(#pzXIET~FO1GsEw0ZBYW+oR0E+M4t^ul4WN5 zi)-+uGW)7YQV}Z5&+&g#s{rHL^9Pp}gOFMHxZIVIQZ!V~j3{Bt$3seT4=xkg=*sik z=G@LycMD7F3vOBpbx3dvQ zzQ&A@w`ZcdIt`cqt#tetzb5KhUjYhK;_~%H)(B5(uC#ZB;NF|5=P#`~qgZCq-g}A> z@F{7;s4b%m^*rC#>q-#s_~!59mOqgVV#-<8ynR>E_oN8z&Dadsz-cXIyT1VLYz+?x zpv{191Q9+4!vfsIu2;lvNI<&nqX&o0N}=z3Zz@Y3u`d|*7%#t;3!#^6l)oDVL)rzB zGdqlfF;a9t3;+H!pc37vP&il#u?M90f6pw%rH?}?pRS}qC<`k^JD)oy(N?jO+Q%c6 zELqp3-IYim^!Z?^GVvC-M31PMSQ#wX*RBj%7DASflH8X^fxykxCY5<24zA`+&S~;&BU5q^`4hG)Sz2+w%i{ z=v9I3*k$S$C$jOzXs3v=NF^+%Y;5vAYYSI;66hS{5if%GlzT}&(DSZ!GVM&qP3?C( zPpyQ5TgWN($F6z!k#nplS2PFN-!s~E+vdZw>vG3;ooquUlRhPr?ZFsyaK~Y;#4;?u zwCTGEdmNmcf2XT!Q;vrfF3E2>5CU%=DTjz&bq3*7&T)szVyH4zrumRn0kX4?g`^(T zLdkL9QMD=un~a;5Tf`$^^TC%q)+*V+Wz2a>P_!5YGY))7c8&#TlsJR$h#0AWYD%@EGy{ zjV1lBObQ6zlyi#yhUp-B@kJa{QW?@6RrXY8uE2{2Md;pZ6+_OQ)?@82Qqes3Izd!0 z2lJm#-Qy!khsg(T2t4{3(6()0`_PX#RI7RSDIh!n&uNrg(|DJGYHVGXrymv}PlN62 zMWS+xZ)%Eht*iz0uIu^DIa$NbkNY$%`^@nYmDRp*I--J^-0Q;m4}SQ8hU#8%Rz0M? ze6KUt|;h#Ljzac_6#phzv)Ml}|(gqe!yoKFVcz0Iw9rX2%GL}$%1XgAl zyL0#3fLyR)Zr8&arLP8V7lZ}bzTt-HUGw$&071dBdWl=B97HEf7R#V?Jr!01d}nDc(M)Lgo1m z-6{QU6snIi5-qBMtu6MFgd5F2!T1wv{98py!=~ujNVaO!cXQm4#zj_IJMT)(akmBX z1t)@!NKfH3i-oBi@#4j=X8#>U2`|@wswknXTgcEJs)Vr^jLR{az){vGSu{r)p?1@7 ziqnTQ4A(jp+Ah`%8&3NZUcW8_HO7d7Hk&-;<|T~S6QRoX3LYd_A&{|OnmL=5iJ!~} z=bf~A;HbG0&GU+I)Xq$w|KwJRa?Ii%V~JNcs&+m)v6;v=UkNs*d^J~(eqIrg=EO+R z?nrjLz^wvpoAP?EE7Tx)B;9D0Y(5yD9B}aLZNks5vvSPQ{T9Jo53^qoJm?r z16KO6(G*Gyz>m^nBrk5KA(N&@PJm%P-kDTWSHE0~RiaZnkIhu!u@OF+mcV}antk$l z>Z4L*G1jWre_W4AvdiC1S!2;Y(Bm}E^-Or5VmCLPI{|lUhnACx%9JfnF9#K!9s@hx zY~k|Hh|~wGU(03nf!2HRz3v-Kf13Pv6ea((#9vpG7->&R3;DEw^Q+C5IY%jNo8@o?hq!&}QS?eJ~?ZU);% zKaiN66ML~*2_)(6hOYxE!18NJ>S5+$ES98du{InA3;Ao2^qq}pudcN6N@)ZidnGz; z$}7ZDMwakvgC1zguV#8FH5mjp?|73a)Qwd|s^b~9Be42P^csCaCqA4DekKr@jiF~5 z$=83Bf@htva<_gMrfFAhq|(g*UwY@vZ#m6BPsQ&jN_e^cR}>{FU)0Dogfn2>v~y49 zd>tC8Kj65v*b772H>}H~^N{kAI=Bf}!YSKC_E+}^5W2p=!abUT8~P!!`O{ly?i+hD z6c&b|b#XIl+6X?)j>20cW}uQa<9gkW3D|;p0xx#Vz_#uk(6~Uk4rbhZN|F}u~QwW@Le;*?Q1#6 z3~=A{w10yMWA~FZV$%UnRMm`n455#uhgrq?Ahb7l*w52y}A{JMM4^71s%b- z$I>gqJ`H>=p6ed3>48Z*ZkdiK;zpCIoLUUpahPd;_ST6aY|WH8+mue^?v!Kr1)0l% zz^XBIVJ;W5w%yLp4sV3SoJl>U%oK1N_1yT3w;$h&QFmT;>jAZp{T40vhw--bH7`f0 zA!t$LuI1*iLz`u-k>CM$G&i5#+oH?@B5q@E8Od6aG*aW)9CU+Ii=udRL>!P)y{?|y z_z%&0JW#YVZUIZ9 z^RAA>t4+mA>4j{>Dc{@O@&Uc5ree1q$7_*N~JX8tUSX!+%`Kg9s1*ZkS zw@a@q{B+mfQI!0)@c)FOs95`T#VhQ~zxv*XZPWtwkIcp=Zr z|9x*H+AlE0J9V~UnrPd@7$ks(U@&W3eJQ+IxJTBo(uq4>o|rY6uR+7r;i;te{lt5N zF@6_$2{0c2RY2IO9%jy-v6k%agGXl;-}#KTp;Ymb!LgidT$-xxaVA zmZYBhqc59qlTK<9EmJVQWL!CBP3&4eJ9o*9wRRrtb>GEAI&_P3OGB#V{AEQ+k=ZTl(8)Gf`99*e z`*=OrDcT?5p~-;zF^60|i$k!Xf%J)Peib}loVxmA*a8dRaYSt^&%hXI+jv&idgNDK zxzi+E48xDOV)CQn;2v4p%KTIfq_2OTBq5S2V&rBP#hDFoqQTFI;qqH}B^Oej+ZY9} z1Nmvsi5K9z%j#7s$;3&&!Ie}q=}zp~ChV#<9gB~QWhc)!7UL4Hv$!&KGM3ny>)0ws zfxxb## zZYiXlZ#`I(KyUg(7$~@%zV87y~J^>c=FIj|{-HF%D4R)~)=Yp3J zyWA~e-Qsb6yWp4E9y~;PxizYz1D#si`ZqUL<7Qf2bJsE-OlUzX8^vJUVyd^Zo+A$2 ztFB)kJWzwy%qkrVmxuB4`|Io+dn)l>yr2}LWCmQyDJyw%vm9B|MWPLe-%(~Yqc~>S znuEus!;7_Fwu47TORi&5JFsZ^Ssi8V1{0og#kNiPaEvFO?{f$mIDp7mqbRh9K(xeGIf7F1QWtQ|h*$rrWT3SVucx{r+OS1O~v375gl+@b$ zv>loDhwJMShX%iPJ-?$U`IqDP?^E!9QKA)5(qPdphihAdB$x|TU?)R5Q7jr=)?d#g}ONF3(-PB)ZC>vK(^E&Aaf|1ven|AtH8+HqQ zXQ>zK!4T3e?p31v!041G^)bI%IAtIn)PAEHqrzQ;NUPeRX7ed84&zeftT{widMFyH zu6&e_mX0BQ;le22-1ku^$Mu>-Vx|6+JGHrGg+ZL86fCcM_sW^XOu`_rR- zM^W;79`y$q{x?O*>oe)3x7j{J$mqt%o}SN;x71C^M64#ykI8D!<@MvyHLJazx0j&n zOpvwM(*=x={2QMwS_WgX7-%*tC{fCN@;HELK&rCGBB%nV+k2>-iIZo=Y{Qk$y1wwRKFyAGZ#D}3nE5oPSB=FY zJ{gm2b=b|Q=BM{G1A~JPY&4EegFEvA-w(YChqLP=D}!T1W|K?aXzn@zYRpO1R@M48-peE@&J4)(CfuL63mR1v8CY_|yJU zXzhbK3{Ag(G?tKqW4^u%6ZSD!T5v|`d^tkKC9OZKJ%MnCKKU&hSsXqVDq2&2UW)r3 z2g%=W55c2~Dteb}Nsy4k_=%q)9~8pfU$I^^Md^{@((reL6Cx%3Yd%*!gz8u9h`l0? zmz5<`3I)@_ez$&k*e-&2k(2rC+!cg9q%%JRWX&AT@*zZ@L7EQ-|cHlo;L!(%NbahTpFe4RJj1`0+mP3@_&gFL3pHeHia=-yv9 zq3)IeTYBowBt^dhyLavazbtw{h-cE;S1%0DEFOA#<5oQcYc-U-c-e${CKl;q&$IDk zKc)LV?-aGL0 z$H{wJc3`Qj3X7!7Cy^*vUKHn-orS+{P_{g~or8{1DXh9)QQ&eacg5DC08=g6*-xyuA-gY! zasX{Ms`TY|FyGgQb>S!3k|7NckrQ-Ikh21 zP6cEu8*Ma7L?W|6LZH>-B;<8u8Xs2;Ckl<<^d^#ufz`R>g5+cn_%?YSD5G}64EsC2 zM?w*7$V}aNTZm3=W_dE+%2y={P46PFWs5b76ZToFH+h74EMFNu#YyeJ`p}Ln>NPRAN2v61ApT#R*REy?~+3|7b2WK-6Y;M#Vv zT3=O9aG93UTMi(w;DxmSf0=5K+H!fL;X^)3r759BSPh0T#@?m;7L42V1}HsumxB>m zT5FVY0j%j{H5qs(AfNCm**nu*ysg~cbCSOjXrpCwTWynINBLd%nUxB{ePKs?eyIxL zKNPs-v}Qom_O%aM{k9OoJJfHYlz>0a1k$hZx8g#u!0QxuZ#;Q|x#8aXSh%J}P*PNF zaQSl7a@y`jShB02uzys55&F#e0%Pg0{POXUn?{w`r+i2_?F>OmG59;QWIe#__M&DA zfn=n~YHdpldj|}$viv_Aa?owQ{8UzZ5x!hL;xWZn3HlucfgCCo*xh$={h3`BxNl#G zevb)Yf68CO;cN}s2rWF}?T$n03mLbzpQ(nxJi9!*hBDOsKFG>SL^S<&(wKCP6+&z8 z$-w=FO^_Qu=y%T`v)z!Mg9_276v0}VDSqF`!YID0eNYL?oL@?Xb6&kjMGH_i; zgd5i5r@X`yup=Wri?Nt+GVJmntCb_6m_)r#NPjtW@072&nnDy^Zy;O#*%CO%@Ve^e zi2_`jvzL6?OZ7};;UQWMIAoho7M(FxPz+H|_Yigm_=-j${ zid(T3ZnafRMfVX)x4q4&d8Rd}vb7@kkzhNL7D8Qy?+~5P@%*jli^{Ro#$<*}zKqC* z8>y&vN5S049ovl{P3T~ltkfj<$Ly|_r2|B|q(y6%`kp{5n(c{brP}X`-h9%Zp zaLD(g=QlsBHzM7MWHZDA+Kk6HG*aLty`fXLMm`uMzTwN^%|b01`VNuH8Mq<>&mGiF z@#y?c>76dAc&qEOxpH4TuysTj>^YSI*J&M6uP_sF2zLXyitTNvnAB&t&Xo#|Mu$ZC zu^P{~YA(}9kO-3E%|}ZpLayN#FO}Q#Ab@r{f5kl;(rE@0_6TL-5n7W-lUs2p?Be9D zaJdF|3xp_C+oS@moVA4}Lo~!DtIOw12fN_Wv@jShV4>6p-@1OwP{=mUl!`%jq>m1Pg>3BL}NMJNgfa0 zj~`d>r3%L2nG&uI!2+C1b+?$F55)5tw~7^<5}?0WJtbzj36oL+E@)4ZP`beB#+{J{ zxOV!XF8!S>%%412vq?CqF2uc-4(^M@9yjsQTSQgWafLH}`}-X1uD1VWu%i&3))z2s z^UJ{L6C_RR(rhd}bRv6KMgg+)4c%V1_lNjq8jo4)F1j4EH;ac8>0+`?( zGD=B`gH9=9E6B}TEKb1VK07mB6Hl1& zjdRmXff>-e6gDzC+<*?#p(W!x$`PsFxs{F)TN}CkG16C}fM#D`3!iKSq-p%>lx?X8 z&b_yPnqBe$hrZU!p|=ZxF2jn`Ix_+L8C{2JrIRr3!(9yz5f5}$9Z~ln&g;1-O0~BO zNl0&4bT{&BG{{*#2pbM>gJ~hN7Iyh+yc)yA%wIxp~D@a!oNhlG4~=Flm32B!m6`{BAgdq4l!-a*wLXzm!9e%1Jy}XC4VrKKlwu+e zz-i6<2^}x0AnM@7KK>i|sP!(UTvW6IMV9XsD{c}5QWay)gNNBrCnb>~K{z6=eIR=l ze<~I9tuB6*$@E1Ehiblabh(&QdQzk306}^&770;~MBXeFStn%GihMJ?UjIQwocoQ$3DzVEQuP=$*tR7Cs z8$DSC_KTrlK0#u@!baQ^&_-hG$OeX_67G8?85m?9U+Q5Qi&KgVueXJN0JaC#mC`$I zzzK%6HDnp^FR%*AG-MJ zhh;9vgjbf=WCmgVr>%@1S6boG_6U31hE}-PmeC$pntt!T^6Bu(rVj%vu&ApVeiB3G{SZ08*X}?%%}uDm8bZ&93Tii zpPzi!c#`pIRfMY3VhLJw?g)Q$BLmYkBis8fK8CaP=P%HUCId}a?Vcl*gzG75!{7EK z5zFFhBDXk^0GUcbJyxBWK$h`?&gwZ);^{SbuPU01ri$88{9B69C@4wS;bt`E%2gib zuhD~&(?K?F9tr4Z_-z)fsxfc0B%q%t${iVez;S|!gmK3f?=;vGL`~Pw0d;>(d~tR* z=2>_)sBCX?duTzBsp$rb{6B-hZco{b-CtwCPP13La`_R~a^ByaHEV_8)Rb*M2=#-e zJmb)<$E6_lW!uqrjggQedV)upI3E7l-p@8h(U8)}K3><`4*!fO`S&`+zbJ%^(rABE z%h%qw7LdBC=qVG`>+_rx$i9RrJ^)8;B>vv_^x2le9PcHF3KXEBetAe14uuBnj-a4qnl*ufSpcn~MK)J;rYFO8IIr z4E&iiW%5z6NX61zxVLE@iv*8_E5>=Du>+qE$9-$8D|u-A0MR+AlLbT{^4g5KIeiv=BU}ia=W&`Z1v@yA+Qu!~h z#=j&=OcK<#suU3E&!BqpbN9y~e(xKjSu!?z)>nv~oy}INa$7F`Gos}5f14;d zAjcP<>-7;woo7P582v$&%-1~Gt`B#(brzLMR)b)}elw15;W*$Ub(YS(9c<-_Hw+(? zV9XDNG}Xae^3Y6y5 zhs}s6#Wlq%OSh>y;j@65diRM2OqsK(do0k0rHZ+a)85SB;oK9Vo#)G;(x$EH$CGT( zmwsP-nGSLNO4s&bLh^WXubX7?<9=)^3)%JX(G-mNX_IdyvX4{;cHI8Z9Scs!q(ms} z+n`PGvtZahqCj)h@M7P^N_ZZezU8vs1O#;Y4PG!C0H?7mFC}*`yvxs|7m0>{UGl#n zO8)&y{MU(+?mw%OC8P?#gqO7sBI`jaTT4bUrVHO+k=wSURR%AFi?-Zby#wJYYu?h= z3V>tm=!4AWDyV)>-BLjazf8Sf*@&f$LF_T!sKXZCxNqOb?@Bez$g;z|Tw9|PbVD0O z3cVYFopVd&{?lb>-9Expmh>6J2T2F}_-mp6q`^k*xm29)Q2b>1HV13_o__LWE635% z3reQtZBYF2*QbTrAha7C^D0>G$F`u(_PnunY~a$FiP+l;pSf!#_FwJ6!RHZd&wp0p zmF;x=_a3w(b^H4d>u%8))Tp`Ntf2}?->cI-)yA-&v*X+g;{Ve+W%8c9)eBFpSVK?M z_28i^K9*9UE%55B+{(FYW$@rQ?PqF)rS_tCQMVhH@cJR1&n+Fe6q z(Phxn-#g|!2r_DFuA$1V_++i^W%$P&@Jkd-T~qu7{q{dY7OR3_tA?yhVn`<>lRwz` z?Qto3kJVlamaN3#h|h=CzLrBP!{wU3M^#vJ`f&X2(kAR7bMDn=Zv>YQN?Sh@n&ubx zs(I_Zh<=h-@_PtL#o$X7+gaDT@C5twV4;W>&^@lNa>oB3TJOIhO8#;7{`*8pv|0U6 zE#)TQ%IPWCvM~UC&o(zxXBzP9V(mFQ(^}Ad9_Jb+Sp_-NU7k^DZQ#%)1Lxg4kX12U zCSAA+pDjC0aPW^~L!yC4JgE$gXc)YjsOqrv%D3T12}B?KkM{V(o83SQ?_D;J6R<^( zei>b92kdzgsP)k#3WZ3`5>Kl}kgy(e9PF%w<6#u?KkE81Y3-;bFI6hIq{wPcn~y;U zXPX(-d?l=2F+QHa+ze3~(_E5TycGkZWC3V*q2WaaF z33$yPa+my8VCI?C&(Ik_jp!>TqK35~0V_Mqwol?4!T8oVzFz3!%@!Q2uR`t^Udi-0 zJB(d^I1*dmhZ`=J0*ePGVJX6Gtk|m)zZ_vYoX+cnZ+#;ezfcfTK6{g+S!YJEQ;ET^ zXW}ys8h;Z2r5@0d{ru$0zV_h?0N%;r|~*$$$8*e{od*uS5y4 zp8FpVC930P9&n}?A9h)H&Po#qVd9ph%Zye)bL(0C=EE^i`@zH<{;nOAxb1!qNk+jT zM?Je_qS{2Ouo}b|F#%Waei+d?TnUs@_MgKhOq6mv(?BL{O?RfDC4fiM3?S9dULgX<{ zb&MHErWQ8&vZWRU>^A7m@>U>I`$0KghFUB;l6lQzTPLcqd~q=1pTM@U6CAnsDzPH% z;z%reKS01|mB-8(Fk8~{ecP{kkfB?ZmzgMl?}x(Ly@M*jthj>l5MjWNNZC3mUs;d; zdztk&M9F`?8vkXYB>3oO>J04$-1E3SHThXL@`p_(URN6@`p-!xQlAo=Po^MUJ>dp8 z_NbqU|I0Y)im)8GeZ3P+&A)OMu!q2>%c+NlTM0c+T24z5^=tIrf9^$AcO-r&U{kwR zRD#Q`^T?{g1&V`)}>pQ(PNoe^>GhvdZ3hJx5MlLgD;N6+r z$uHj7Xm*)>{mL_K6fy`_=HJnb3bzKLxDs1|^SS*Ez0r91eWFu=!6^dM1sC4nJ~Ojw zAagPpJ-FVCFGgcIcWhL{uS-LB$r_7s+K{1)bfpcXhZ84)B-_BX!?1b_Mu5Z^wN7Zd zBg_v<%(FaAM;booxG!}dfbSQ@RjHpXc+@-Ks@9)Oh&__M>&Q(*aGY$Y%6*!Nb@a~{ za%D2d);z8UBf=2#ej!!2m;p;Sg4xUc}+Xu^h`Z+`qGpOeR z&!s>tWTxD=(3b+^*T0PZ*;5U9t$nV6)^G5uRDh|UWH*YLwBIoe>w~w~#;YPkiEU4S z!b4e7E&R$KEjX%K4Q|yWo1_98@Vud)YQM7y-1lBrW1Q&)xhEXPT(cdReX%7yQ>_Q< zy6TVqU?hweDaQ^8J@v+0G|e48X@j88tfOvA43pgJl=mW~umOhIcQjJ6MgyHkVel|1 z9c~*%c3Y}xA6i72TMIGx4% zJQV~+=ynu1_n^HP$H12(t?0Y!xJkEYJO&i*uk}-D1m;~~JcHzeaC@s(^DeV7l(3NA z;1?Xk7&{*Q{3Dh4^vuOpI_@6q%rEg1+3JVaH)&U25>lq17)=BEo+7lg+A?(PP8~!Q zUrV2x9>OmQ>*$0e=m?BNtYtSVGT<;Q!aq34FIl4!qam>D@@j}N8POERF@fFR&g>*j?!L&Z; z%QaNG@u?mC!p~EhZE1jwUW&P0#w46B=xIBhQ3*fUgHJs$&qIdKIhN-X$6$m^rT1K7 z7wj;-*4UqH2gmCvrFjPbA))#kqU7)HsDJW>|2Lv!KWo}OJN=(<{YRYI_EQ76l69Tc zSHBafUZgzOVK;_*Qyy|1IxvlU#X=-gxBS4EylaQ1xW_P%!sEn~$N7JKYyPH?a^}BD zlq}^7`3ww^aFDD!*LAuS9$guKJEdNOKb32eADa|Insdc7>Vxhmq3`>LmAMer9+EX$ z&?mw!c8Y`-HL$L|J($Jpj;>99jQy8?Q zg8R3{z>SZqCR^?oLCvl+>*a4MLH)2o0pGn;NSL&z)={#DI6jL;uGvh~`sSroOAsks z_EW>k;*t2w@mjp>n;NVL(NWpCHycjAuvOAcu7;QC3-j8Hxi}=;G%QP516*~Yasr)# zxH53;=tA9F5ct{f87Rxd1seGZ{NSO=@o3`lE_QWCMf?n5m1p9-8#*gsyr zB?S+Bt9!K*^U(5+qxHm65)^y$zEwY+1BLFP)Eo7MC|_(x=G#;auI``nt=g(UMreKaP_co{S%YfDfCSV} zo~ES^&4=~8HaCCH5*SL3T3x+g2;LRhC${w=+~X{GXh4?>lYw*Jh9--!UrJ{)M1`9s^G%-b(Dt1yoH$g8ogY;ca))%%R7 z=x5}QjD9c3!)|ZBw%fD0IQz*x^O3qKbX?F{*B~4+BJpNdD=ao0o@=MwPl@!uyDCsI(=K-i&iJ{LV{KLw1qcA_nfd!COV(V3pV%4@$;jL(e5 zZk@40%BGif;b+_A^f#Py2O?@@>AQzthak{P;6{4l>bng(N(z6w$r$pwk ziff0m*qsbe3QP$+&k%}(!-JPBqf4Mgvh7a%U2ft3tXIrtO*?C0Z?o;Kw*=AC8d@^jG!cd>a@6(Ew-eus~6RlFg!iRD_CV{)U`49%X`cvFA9o9P%abzmU$ zBkZilF4c2*^=T>0@sDv&_~!xTaN|kist8EEcsNdTtpG`e-RaL=3SdO^OF=|SAx;IJ zJ{L@p0Y*1cZnBkCLTr}AjcWqLV(`MTp;^T|I8yz*rPy0R2HkQ7t`aUJK{U)9Is ziNTekg}aq7nQ-ads!k5Jkl%X57f8a1J$KbzDqY}M)fI;bQUTF9+UHWoTn+aqa;$G} z*2A9Kw7CU>w2AxpCu#I*DR^bv9jkd{g-LrFpT&0P;P$k3J#pgGl7IW@K>W85e48`J zLwmCVldk9;$>~pno)htOOtJ6KxT(PSK1~DWEk>|)1|tfVs&rYq8RK<#@952yRA5gY zoIA)<0iPCPMtf;O;jYJCEsD5Q5Oe7|)%++Q=mN#Lt8Ws9Q&h{;+o;93O{2pNFA-&P zb&fc2r2}U>ljZ&1Ht1VulfLXO2r!;gG1&nO)_1;3u0G%pO z(`(LCBC_eUSsj;M*^^MixQ~ZW3&Gh#L$c=T)u6*R_R;-v5z@c^#cgd8jTc|}-JSIe zgNrfaI(kfXSh2o2w^btn((eBl8NcjP%+G~5he1JU% zO7oT8xxhx1P>R~t25d8(8hk`NPX~sMo$2bUgdxj4%yl{qX!LWr@5e*p`n)`5xkr<5 zFqDXWJ;6|j0}OF?tj`J{Y`XEv(b-Hek}F`Gmn?u=+7p7j991yS5-lZ}6N9I|n|(G* zBM2j9Wuy~M!Ur$JoPPZA1f>F%ou}8!p`4{HE=x5JQ(E6hRn-ua$yE|+UzO#-%l`d` z>Fo(v_MK&ZK0y#vgbUyEr7FfvFY3E*UuT0+o%9?xbsShnU6)obYQQK;*N~USzTS*fWCZs{$W!YFpC^gl-bvT{UrS_fs$qT&ijVav&C09T2)2Mem(_n zPBz^x`&|TmO^aIvuQ)=<>eh(~{dP?IvuGE&S&OTZd#jRH5iUm2IgI~^K$@)yr&K3> zaOZvYuhhFrkQBC`_QW4OII;0rnzgAAmxkZ{Q7EoJYx~<9<`)P7V5{t)TA?Kx#m?=hK3TFUUfgP0MgyQ{Lv*%~W*g2)Ls6j);8Ln1yjyw8FLGHQv=p z2dFJCp5)xj#un#{<7{J27_sYw|Aie7;akkFN3E9|;DqQ7y4Y(;c(1@ku1u#CE@r%! zRg=ia6du}Bj?yKd?oy_&QTG7{zB?t|A}@e=oS7@nNx?~pgYQ!0d$3ygP`}7JLh7pK zM$i-qI8QNOh-{C-`^xd!c1O!_=e0_;mYr#^hj^#YEvBQY&xMvnk6bLjQDNBoG#T9V zli42%7K7Vj>3+BEC6MxPftj9gwe&R^KlMsTfiYpWmi5pU6t)Yi{^631BB>vve@r57 z^B*qRG4Brne(jEn!_AOAU%q%XwF+lt{BzmgWN?rM{TrIv;_Gy-FFm7M0k(Djo$*p0%r)|tEkioWP2APGdL|M*1{!)AD5CJujUy}91Z(kk)7tS{`dPqL z_HE>&vJcEERuxtlCBP0V`88g4eN>TR49_Erho9=FxTw?oa4!c7z4Cw>3@=VkB(~Or z>lcbFwct`5o#tR%Vygslk36S&`f|K#FfXxJrWz-m?`&9kehQ~H61ftX-xEk;&AqyQdzQDJ1F-lB=;}bq0d!Y&tpE}K>HwCi+!LPeK%>c z9VH{-*pmZ1B-JRC|8<7kjZ}ya8D%uDzV`*S-O~N9T60k4-JkbUfBbNse7-%0DiL0a z%MYnY5ajL|{*L@V5s*`u)zoVn50QgA_Kh**WBL{FDs#_9lQs$4QwPo(KhnJLZl_9G(bI0LN z#I&shjqR76h+K*5E#Dc!nIg9=*y*|`jiD#t-@)z%C!qY;T&JnIR@cr=W zn>@cLTyPZNb~ZY{ zA5L!Js7C?<+f6}ucYn~N3^$2|!sl1~&X0tPfHee}0*na~BF92}*Jd5G`OFgox4co^ zL5D2)RUNe3>@)MWA*L{1>7u;NWDJ*HzFVYnDn=2GK&|0>o zybQjPSDVz?7D2RYpI}55V%Emn^V>Y~@%0%&v5x`eu=R@3;b)?;=vX9N;AL5bT9w-? z%Y7^0TYa>}zV8mG;>h7;b07+Hp0Lv%p3eb$hvB_rM1#+T@i#Y#sRbve`nER`Tj&(} z;|_Gn1bLP2Bm3iB7n0kk%E`u-L!foqRRtqU=-G04JGBJ~^^@{3HZ~u|l^h*7iij`* zbK{`rD|0;Vk<%nZ6OEjO-*QLWyiqYZfcj^36oe1BemTpSk6*2Yk9(G-V}Bgu!jzQ> zkiLIRBN=wW8%%OdBQ&G*Z!hRrs*#ZC=qvhmVm*DdU zrxN%I9BAM22_TojCliPA6buj&lC#`bjiI^;SEL*2!0bstocd4z@QuYVkQcOp@4=ho zUu|vhs9wcew+~qu6K&cfY?gs%N^M<#$N8YC{N>@uFX=cEld8pkA`dRFekW;BG!pxj z%rA3N);J+jFzl070#pHta#Z2v=pg^P;Y+0#sCEsHsyuB)X4f*BzGAIKt9Mj8HBujLMd5TOlnZ`D1;Bz)CFFMeaX8aB4P@2mdRff7|mq6?9PR9QSK zvI9O~{5|lGr*bH4af|DiWk$79)A`yabKDzC{LkMzNEKIpIyisT8 z1^M@7O>mg5McR=i5=0t1b13QkQ0H5f!v&c}P}qF5TirW}HH-}`c?qXHh8we<^r%fW^9v)Y~Vb-?o@cCCycS~%X7 z7#*?5#jgGBI@POH`0CT}1L^Qsv>|O1zCkD)c8}+dQx}xMP6@fQ6Ep3QbmjE^=+Oi) z@|R?)|B;7vyX>D9pNK?$!}UXRP=^YvwEKR&%0#6(&sev4LW{ebbc(mZhnOnbkmaO9 zkblL@yw(JX#mz6-rY%51(V-w$zqWkj7W$O=_-@@_-LAi}V*Gm@;$IX({5H?Jgv(R_ z!TiaWDYTxqRbw~OUC)ic-H%hN}s+E=E7f|_tSOytnFpi$g) z&G^Hv=NYK;RYj%PAOmlt_$WUK2txUbeRckxGHVE}#u~E(sK3W%oyAE`;%kTM@SL0t2 zC7)kUt3ME(fO&;Ty)C?>NEWHYpZYQnosA=6-m3KBGwKTmb#GCX|1+ZG%zv9GF&=je zdTdjOcUf8}b#=$#^q@sh>X$mO5&q$lD$@@OH2T*}9+m>l7yi__GkF+whmXmKtRE=J zww~2*83XH*EY=zACg>ecQAv?V!}RAO18<8OpzaSx=V(+d7}8>!n?ePep4-+te6$NC z=l#3o`l>-xuJ_mXqE4KAp6d9frUtjvS(ZO6ZNoK(hoM5->p`p7xluJe9~6^@+2}nA z(CmS2q_%MbD67Tpm>=$evaKKPtXWpz{Z-~CDXzizXhpy|)}j*(Ofp^-KFUOns#n%Q z3f4fXF?&?y(+-hh&x)V?r~|RFr;A78iT3h!*SlfoTA`0wL@RuIA9Vb9`$*iT500LY z_p@^xfVqZqgLlt&U~bOV?W(s@;T^M`Qr4dom@jwZnAu)|TVu7Kls5h&TJ|?Y$-iHT z|2k2!H;3B9R%!zKvTiBM?(l@+*%Jq()55^zLE&twUpvgaIVCt1+X*gDbK0k}V)0f} z$o{V*<@o+Izv3osCv;?7@5nt8gW0Z$Moq)w2tS$DW%OoS} zvC?j>Im7wTYWlOzg%CvLh}KfQ`%Z`?$rDQDPUS&fsmfRJ7cFp!?d&Ul$qp!Qd-Ler z)*x)>yw0DdF#$iPKcvbn^?(Au+tG~3WT;e@9#?g$fGw}$^m|!Hi6Uv?(s71x@cbNr zwOa^3O~a{}%27i0!1!@XV`v+GbV}b}F4Tor#DZQwH}3&f7omhOqXwu^&YEXa>BM!` zf^>1=cu35pzTEdT6Qr}dBV!yJ!8$s&>RV?&DE{!fntY!GJE?jn^X40{q?vozJF4-o z$M0{5k~9A!qU6Z8Ef$<~gHR*OllF!6BRcQ7VV9tki{^r2&7YNmP^U<|l(lM%7`PmL zQ0rwIJjr_#Q%Ti~S3TbHIz$ItS6{z~j6@q3x)0>5U0ANsI@%>aN(lP{_Y?*V!^-|wQ1JLO z?xyqimS7>mW9B_;He0&k{#SB&p^z%fO1SpxM!gx(b}YAk5hwtK*cj^a@>)z2JWgNS zSc@-Y9|S!r&A|YTwVqMMQ9L6qGPUFSEL!K@WY;RH{_E`h4N>xsv-jU8O75Cm2xgA$ zfU4()d3vN$=*4Kt`z*8gsu^xir$oc<8DEY9B3r7y4*Xb>(N_1u4WgcOIhYGoAY=GiqWFV0EZ=y2vZkyDk55yl&30#Fu`F+$ zSBnoc>0FYmSkDDdna?vc)cwft`1a_FW+JWg^tQ*-^O0C%N+~n6s|rhGC<|@ag7GlB zje6Zo4@SN6(&g6c#;2q{sk`mtP^D4O_UBqV{_mIV-w-AL$BFsZ?W}(%N+hmn%1UUF za4zomwz;3qaK*C1U5>sHv-)S|S4cTHI(w+QgRKb#30Zj>)9%99512L{{$_T62cB2k z)wtKc4Ii%-sMeYjQNg8ahTFty@seldpKFpNP!l^>5~yEIq#F<>QSxs;{QrX}`47MKFOKT} zl_(+BbN>UPM6K_KfZIkM80p;Lx>Z~cV-lr83iMT&;^nI|Xfy)!n}6ohS8@^0I3Bmp zZ^JD`%H`Ue-KYgOEjVtB!D?K=nK79L%%?D(k19+6YtRfldy1$Qx`!4r_DsU|+S2<9 zb{$x#swV8YqaT%RCse*Ap2Hy>WmW^TuX)VrMN-k^n1$|o z_y|;)`I#P;DaK0k`kSvVx8gt%ncHWl9xUhDEhF(}6s6jQYc87?;I`wTyI*7{VH)3v zn9{*EklZd&LiaNr$=Ijg4~})fpJlOS#nw#t=u@Zo@_+}NdVkqyB4P-IHm7A`66%O1 zfoI{J<~*2kV|sg?y##KakCp#rL<0KPrK@wth+^E?{4}-R_z`8!a%nRHHGcW?JACZK0@?v)D(4sIA+hT9X*vQmFKeCU zbuI$_?Wdx4##iDK$vMLv!2^)bQ72wSO%zq@4;8Qw#jPJsXTwILiqKo>_a*s=5WMuq zQ{Y}=792U0Q{ooV4Osnx>tl5_z!OEaiyA#hZjrjYZcoH<^;)m49V~#&otL#X!CAk>blb)c+y&ECx?|lPS^7se!Di%RNhi)W!R1Qp+3V3w#gI$fflGVuiI$_X2e+oP zVK$76dpg~t>%eV6e@fdLyYWGp<|2b;4M<0K-VCl(6+w&0Eg(qJG9#s%;GG zFlRe;y64OQ?ue2aH&^JuV_BAKzJn>a<+|v+0&_b&C*LM3M`SI@HiFEi9&P_?OaC{8 z6v6){QDVyWa6?0osL+N}>|<0b#KW|^9~lb2z~2szbK*vY*bxxUYE;mUEfkF|)LW`B z?9?iU((PP4<+yJP`Bop)q-e6a+Ej^=+A6DaIu0P%$X^{Xm@nNwPK*$agg4KA*>}pPK*_JJ-F8oMp_p2mW4SOLm6*s= zW71O5KiPTgN@X2X>vm8+5o^Im<>{|nwHv_G|8u4#p}72Hd%yVQg(}dTk4a4DPelbC zvv2&m@$gk^AolI;I1En7&!frmgupl=zdDi$>%CD+f|+?hcREDIkA(zmLi=Wp5~Ihc z#C@!ciC08K@!YrM%xn~R`<&?jVPU3#%qcy-dSs_m3`T)0JoYr};jC^YoR5ipc5bi; zvJ7T^311*y0XykO1+7A1He{{#b#^;G2zYuk?qVvCw~uAdRuGlv?HAvN=9Iys69!I- zSdTt0Ec>rYwL>500j_9&T`+C5D!8YekK|GBa%jI+V54n{K$>tIt``|gGrVa;IPdS3 zte^;9%lBo{w;|3)ow@wepM>@y*Tsh>lELG$?6MdILao=H#!=8AIz?oUo){M5=)SBM zq2Ds#{&n(f9)eQ-`7Pj?Y-A}UaT;8@SVT-YD3gw!MiZbRBG%v6lfhqh{HaxZEKH<{ zj7G>31HwXUtBZ+<0Oc{?_N&{74&=V`y&K?+Pp;rkcE7ND?DDtCwU_4%}~qf_19}W4;ow??^RtYzc!7V{`g> zc0+7YRx0$qlM7GZp0Fv5Bcb6}Mt?LH3=w`mr9 zw<>9qV=jgxYogXmdCK@$!_q7NUNB^xsJqPdxei2MJZWwXvL~EN0vl4HDNy%NoiFT3 zKDrBRe_5td2mTlL)iJ)QhB4P{*^lOhXcg1+Ufezow(s)cU(6&%{k4azcu&T{#evKG z{A>|;RPst+n`#z{zM$k}AFY9yl#BbT%nRV>kR)T^S;82qCZHrfTY}=DKSd(Wx?_5w z|Idg&iJ<#Y^Qn7ZI{ua|84&I-08uK^G0}TfkZQX4+<_qzQr2U6qCX_z0l8-uTXbGw ziNsrqvgtHn41Os{^NS#1+|`b;8iCY4nOe? zM3{EokPK~3hd!&86PtP2cwSm%oBSxzLAz-2UN<2XvjuLl77G)5tMj%yo+XyR(V)8e z!?d|5o-fxH{W2SU9_*>Bz7q*Qzq$&&-`6_^D~K)L1qf@7<8`|GbzCC+i$6H!A|sR6Q>@@NEO?9DTMh@+}wKXuVZ4 zeM8~JmK3V1x<%kYI`!wfQXy`e8n+SnmWCxOXDjoi?V#)7p2(N<^>{}9z6|}@8oZR= z=y2zR4IX}8pLm#51?)1~Av*|cxn1pn)$=%FV4i$yUeQVpWKX8|1o;}nj}Z3ihCf9> zHFacq$7Ujm-Z}Pl_l^t<4SpwRu~r7BV|a5bvWP-*M66LxXfpJSG;xh+7{ltyN=)+m za2bnt6#ra#iJ}5b7B8no`whUCBXu#opPaH}x>&OGfsY zn3g0XqEmd^J{SCq)!wHh=3pv!b)aQx8Y-=tiC=w6xT}^9YWJT);O5f(W3ur9V>^7e z8YZ;CIg1EhsSzS&&@=4}o>3^(ApeX{C>b{2H=0qt$ibf@YkBsc3b8#W@68Rv8e+b8 zXrSP-Co#2dfhm@lsV5aU&>u=X zZMVjt-autr+khL225ZdF4iJtiCW#x5Q%itF&pAgu>mAe!cbN*8Mc|0Ri$8;};^0}5 z<#H5}nHLtfoDos)0JEp9W>j63$noWBoFf(CSV29BbE>ttnDD=Nd+&Iz|F`cyMU+Zb zg-}RTW+JV_Dn%k0Q5hLoQ6dp#XU~vL_TGCO-uB*mWMqWWKxygsImCANNOXZ5|hqKKFSk!dSr-z0Y`3@IOj|&)fDCKJ+09O_vw^ zOs*6G8D~&#LQp3`n68)?1t)^Aiu-WJ0MRkF~^+wkwo3_r-$7zQBg*uwY#3 zomio{8wa#BcOtmFTG6PEOnOg39@0*5bh*E+f=55%^{t7NgjFA^+q-;AU{Cba#OT)& z^e=z4meDJQrFJP{2Y=>)U4-M1=BXg)T*){@eWMZhMfJC>RuP-JX+gbbjPW?bum3q+ ztqBG+orQ8IYT!~%g63791T3MGxac7k3oQfT*L^P!B2`fR`G%Gt{2J9zD7KV=%yBoR z&upoHv8B3G18&y%Em>7;ogf|fVs-nA`;xGoKBG=sHxX6e&7Uicd=BZBcV~8m`Qo)b zHV14qh|P1yE_}9Nq=j;I}ILHvGYC5s)s-4jxv+4dE)-a8?1Wmj+lD=l=z$94R|B? z6chEKI1Cz2zw%2b2(@({Bqa}*<5G*_9=9tUu%NeZp~)x>kMsU=;+V~Y@i#&b<0xAp zX>?JIg}wuv{k0@>^GR@pEP6TVY!~r+u7K95Byd;N8X2xg#06=!tGzR+NPSgzGl$3% zkhqv$kjdbWqXDoRtoY+jWNjWfY<}9p$6;iV~C|zxV9m&kA&%Il6nx z+-o%cHYb08ofs}q+>#eImxgyURknd+9QrU*e zO^;i8Lzwc(k)_ns z94=aZnTX9DQmM_s3799=SbU9em8qQxiZQrMkSN@y&Du-3K((`X2mN$B>^{2R;9P4E zqy#MMD+AVCtNd9 z7r$7N5}CuPbzRDaq#dD_L?{AUSn(&Y_}8Gmg7n9_?b*03`ULk0i+o@#C~W_=l7oUaD@^-$ zHN!8{cHL&vYFM)y+>sKI2YWc?%CmpG0+~Zi^B3Y;;2^mygW}I7$Wf%Vy=R($^lU9f zI$bqzL^#yEuB!^iVq6W31%q(i#`hpU@txhG^Q!$I@g0&A66ZYjCjxV+YVSY)W(C(L zhqJb}zXHC4Z;y# zpFE*F&FfkO&T3&zTX*E3pVmz_hL%$B6i#pWDpUj=wCnD&(?rCw)9$%hqJf$gE-P!( zS%Ve6+fJB@JwTy;A(<00fmnSq+=J~!E)?v$T5P({57-&$n%8f(!Rhk~JA?la`w)A< zjIGy`AgsyxW^QO6@a_V~62l{*1*!>Uk~eojs4c>!9w449pCsz5r;mJdq)i0)6A6i?J-LA5d&)dw#Z zc%M;Ulq%bYO6Db2YmT{a>e108lcj2)II;ii=#o1YKPnG)*oi!J?uhH!O4`v!VMU9F0qi#HUqQEU$QRqmV)`I z!xELoZSbzJfIq~c7B9}O+T3EQ1|g`Rtskz(Gs!Q@G&X9nb(Ea^RB#&l*E>IbC}#&Y z91N;FLx>+QuJ+dud$JSUkF7TcNyJ?AH_Ek)cqkv@TBTJ6^#?`&ynjv^6!8IRMd?0~;P-oGJgW{3_s<`4lTU%g^+kEh7X&#Hd+V0$ zN-42{V-)5ihGSZyj&NGjIJOLlivHUP`Y$I4hN z01++C$A*>!hbeqdm(*4Ys^^5|4#o$g7u~7KQq2hD`{c|utyTpe2iMI*3r3-7!Tt3n z!Ld!rh(*~2Pl4y+8+HlL1YGEkS-f?q6fgVVjeRy*f|icQ?RWf+{FhhbUlJuI^KbLI zJKo||?$PB7#8volAU13uyB&TRtY$j#d<3akop<|hO~F4SN<{w0M9HT0(Pt#5HuMd+ zs8D*Ggcaj#r6z(!$jK&boatE!SM&FmpS;xta>=s$+iuKaXQSNRkA&0Yi!T@b^v}1j zU_s;Y=4UxN*yq=iovuWk$^qAh%7kp}zUYGFNINY3;xOqbB4I*m$lB#wzEFC3+ZEN7 zG_>vvsL&u{i%VYl?^1Il+R6T`K;uBv619I;%J=u;1@f|nYaN8#E9Izef=w0X2xUzK z?XG|~JlpK=FLyv(Os0iUA_=i)SIG(Wm!gT;c;5@E;R-XNw})_8cy3EYR6k_d zeZDy4OZceGHuCo@RuZE!8jq$s60kWnjB(Ry0zbr)e{%QvhZz5Fh?4)f690XoWYJSl zFUO`21BYCnc%`-w*s_Q-@0%`==~$>;muf*vQ>Wl$#u!LDkePhix);fp(;wQljX+U~ z*J{28qGsHYl)JLT+iaFav;0vL6n5UYkYU}7N8U3JHYBz|gwxgcN3_PU$7SYbur3L& z>))|gWosj%d}71D83bdn2>US!wYM<6yJh_&wu8-MJ1&^X((;d3q zXTxz;$i`c?Faf-FJ)TZ=?uOAT58a&qjpq49GVhvcz!45ls)*Y)*k1fjxL70^wtXz5 z;fQat!oQ{0&|04 z?`0<}PT2*z&#%{Radk#>u2bS>9|$W%)2HRjZLd*0Obh`&!3iA>38Jh#g-(2*L6QMA#Cvbc5P&N8I+sT-;uJT(rR?}7HL z%NLlLdqA-J#(C~z!^jiYoAUV70Onp8Ieh!XEbx^d4|FM8#Nt-);da2@6w| z8y>5hSbb!~;X&Xxu#Y?}@Im$;DC^%4CI5cl|38S5|8%T>@vHt{i4tNx_kTf@9DX%3 zBqE%PuCKe^uByF)FSidxtTz^byxu`+suB`tjd$xfUM~jD$Px#JvQA7b928t^>B2eM zFQN?3`#^;4cAjO?mS+YKwA$lc!K(Y7AEciSW7jnM$q zhf{osecXd4iI11`qDmo5JQ;39Rf1re(GgF!ezd&uvi4$8JQOH)(6gyX0fRpZo@({FKqdFhh)5Dep>g^eGUX8*>pcWBT^_*vYgUQoXclG* z(3AUCHG{&=v`Cw?B%nOvd^y0=6=)`ZXI|~CMyt%Z2tU1ijMFWbyu=WNj?}%5zWtr( zdH!`nyhaC%+$ypplkWzWD;17o{tX}-+%K0LTMOUzH8SMwt^-Q13Jc27ArSvzXs(bq zgu8Nj%J)@u!Ma*MZ}Xo@qKIst=W9nach0k1@m`99Fq8KiEOzx!MfM#PSn8l;-fy|f z(hZ)qziHVaR7}vqwS{NSx&wCK-;{FiK-nxy1I@3c;P&AAuLHH!u&6^=Tr65pfcXvU zW2-K7nrofPnl6Gz9=1wbuXbW(_{nh7z1jGq;GnfH(RA^!7qQPA%Ef%jLFj%E13Z^r zt)4nR27kJKR+B9_q4sLcbH2!AR6m{g*@QD6twxQ;!eUFX!i4V;o7f=CmTX>n-;_oO zaW~Ao%b((pC#J8uZ*;@^`Vk4yj$C{&op6ApP=%!JQhQpgY9RGh=pG%mTx2loQo9iy zkC$!=*16G-K}KHh_O`k>Sbb7$t~pl%%E4JeT=|5PKl=@L5vdjO9{Zn4pC#69d#-ca zJ!yn3G}k7>O#9%o7#y=*4@D2^d)fVE4iKyD&HC<2(SF{cuFVd+)iyTL}}YdzPU*@H7VVgfF6;~*Lsf4wfP7OK_KJQAJ}>c^_AKRzcb zz`y^Nqg4x0G!Wf1Nj;qZ*Y@gfh?4*GIR5KT@P8pn1n88O^D@%m_G+$DByS~<8(-#f z*){@u%$~}28w~=>rSNzAD&~m@5Bbujky_MRjWOo7NyRDU$NL7VGtj`1Z{3k-n@;v~ zzh^ff3}Bh8MV1L9l%R2Q+qIsJi+j?nj~Qn|E)V;Jl|?*s-lQHc7a726m7#jogM*O! z_{poseo3gMykMbeP!8{H#Xg*vi-p1aer59b=(7b%YPyUhTQD-$zoiR4DBt$5 zmmmg_DmhLciY+CS(lRmcQ%Nu*y}wGVx)TSKf#-Z~8B+6eZiwVH!ci;k8xHsC;rglD z%#}N9FlYSo;@#VspvNU>pWstYM4IRvE;!hSda3^TJGeR^PAyP=tB&+eh? z;TY&TV$yw~10G&@a1o9NqWfndhhLQgxUWs%c=UB*0G8&6k?*hO8)MS`X^ube)UVb1f){`>sn}ac`!gE6IOrX(x~=BG zRJy-u*l+wbO1b!PV-`Nn6J2|aF`$h$;5eb%@Yl`#H-(f_|3jigewJ$c7)2QlaVc^| z3)i9OXO7_ev1PD2TCka0+>Dj#C$?W9ZngW@9|RV~O5*d;53~Wx#G1g{e)|B?;r?hu zxml5busr0^d%zty5MxdrmMPF849R-@8g;(gRS9W zfAnb@Q1SBE`vZNoa0o80KBj#NbqA*h_&$|Ffhl#>LxUFR(i6B6niB^w6G~FrUx>Sm zn|X4-WP(_7O<~LRJg|r3$}RQ{xKD=sNn27Vx^%MG{Isn^$AN31PWy7vv}y3G{@4rP z)RM~_7fQmNfu@e(K6S9GpeX#ywswpT6^fiWPzO@4)6d;s@*`3OQkD--=3rGEJqsNb z37bw|6tq&UM$7EnZ^cQ?kn@Vq?e;rejN_)4>mxcj?gqPDFBcI6&!ZmF70X!IQnOHW zVz34hxCQdJ?al#{Cmx4o2+y2^=AOOK_QRjtVV4G+gnUv=t?a%V zng>O#k2v5B9mASCy2ViQI3xA#ObYx+Z5;a%m5wb3(h~Bv7vs_EpLbqpZ9@9o9Hp}k zM9p)@UFj3`-pE+!b=R$56!R>cvo42b9-)2rSL30oWO$;- zX45BbhsSBfww^A|LY0&5R&I$IsK&dR=e<%xG<&~{o0G~wepJt9M`|-9^)_Adm(4(l z(Q%H0TcgnV$q8GUE3c73e`msbwssT{&e$`Q-;E#t2Ckg$!^r>+~+jA~7%?opc* zVs&`jHr+GjIDI&kU+HWiaFBe3%=cH|aIKDk3SS*OYP47@4HE)py=PB%j@Q8KyYOe% zmzz-P%=7CH*bx1UM@@~WuVB=ll{Z%R^H4`}ck5}fuHPcC8Vz$qrb^?8+w1*tX`8K z?C1UN1t2@tt#vyh0l$fz9ucW{jXP#HtHc!&usGm{o7YEz+|X?j<=>wO@fAKQ z>K_|XFst*GU?B-;63^NUhjyb^u&k7pg)11%TR%DXz8@Om4fgpEqk4>ob7SPO1;j7$ z>g!P^V}z(te6h@xX0N{@kBzicn2duyy=>F3_1CR3H_{VHES7 zn21A7puT+~V!ELag~>MZ26pR$vVEt`Pb`J2364f|#Pe?{5w!1zbtHDW>T6cyIYZt- zM)(l@96zupGsfx#5}h4eM*dhr;yMxHnNU^&E?L_*=IYAvi5<(y&5TH>7wR)Un_hsf zeix+cS^~kB^YG;Ky(}2g|9PifBoQ{ma_dhOmLYq5Q$fd{LO5rs z;N8JXM+$`E(R?1#EKcU(icMFe&3reAb4ll)7bhXh4&;1(m`JhFa`7AttOwm|EEL^R zwfOk#D|!>6lirZpko=~!4M-Gfhm(KALCIRx?VE%rw9hmnYQr%I%4etQL&F-eG`ao2 z_pEf-Yh68d)QcGF^KQF#bFl-&s5u{4mX<)He5Ff3K@O4bA}n(6S3W#96uGd{Pzn@N zlS|tJ8!*Ro`(>51Y77g#6%kB0#`a34uiNb)+*b}Yx{RAyC>-MdVtjWu{!-p`d0wXt zDk_W|0!)j+xq|fQ^I9#OXmz7b_d~e2$+}utl!p?Tl}s1+HbNv9qwmc#$#_+B zJ37xD3?r-0;-3-@Fpe#sE#f)?fl+5e&T_3C{Y#t0&Z>37*I!M{yq1;V=}FEJ@x~So z7#v`izDp3Wj;4k0h!L;U<}dd?4wRru9i>H%X&Vu#%6CaHu^J!#VRp6-iG!0XyezYk z;pp&t$XI>67>1sW9e6{G14IZfGkl70!t)RPLR{~6g2&1E-)okgNb!vBVun*A8V0?x zeh~8#59p;|s%J>UJU$mnd&x@7eo(E}r4b5D*XWNK5vdFmr%mj8ZyyxrF3l}8w?sBK1}K;M}BsrK!-jXkokIk+Z2%s z;^Y_ZapzG6M3`1*?3{~(CsVF>zfb34Xxqku@2Dmus|{_jzSjUWG91R!T#d+#GkTW{ zGa#=gbMX%0ez`zTB`TJdhi2sbmF@k+dy8?86i-Aw#17NYliVM}ZN|J%; zD;tGuX3m=5GUMQg`Cm+Xp*MKmsiug%_82B0*m-%T&I;wT| z?s`OMA^!wDb}cKa#S6F8dA3|?gKhQ~K2^06=^a(uX$RjC0{7pHbPBdngqyBVF1Irp z+2D4|Ps=RCL)={Kc?~d;&;RT3(^@=yp>~Rj>k0C37m?{wltO?U&v{PaZg|PLwM9XZ zAeu;=-}&mi(B-#~rUNMzIDZGMloIZYtld-BpWpKakHMZJ<+cr=N)n>8KVOOGS9?#! zvDd)j+jAoU5e48oP9~bSr3p(MHAeGx7J{mF`O~JaB`{DdmUtl|59y*IkH&(S7d42n z-4lcPR+k!9K8Q0xshv1_^)qQP+1Q=fl zX`IY{0apvh6;dhkVc+ye*9bBXAc-o+Xj5CE+#{ZyyJt#~m5FzNR=g5i$s0P1%+m1N zU8b=bEW@Y+p&j3ej=Fe=qwHRvE?lI%VwiWL3MIvz+z)HEgP_9x{>jB);OwQR$jvQ* z&=MDBa)H;dkCvaB??o$kNO|>3iIhX^)r&FYrIiqIF1!43MIv}#xDZ~h7zeU+2P{TX znjlSimFQjNLwlNt`R<(rffqoTJ0n_x&o(@@uMtFX{x`!Hm)DYz$Z(z-JK<-0W4jd~^6WvCTuYr36Ezv+zj`!7#a`Br02G8dH?O%?VfJPb4&48w(YhrX>@G^74r zsspjpW!NbBsS#4+;P~r|Az!&DNKv^-B`Vzllq_N)M0XeCtetqKOp8En!m!Iqt{dF0 z=-z+ZQw+IxQG3xe3$<)ZCuTG3!RwDOeQ{k6G9)JYC+#8u-PRBMd7s;0|MLZl9D?vQ z5;tbpI93QvRKM0;=i<>Y(lau2524yW!qU5=u?iQr`N^G#?SpTveJopz3W<_MZb9tn z9Q>Misu;hRf`yd%SF@ZJSP6+~R(kdVmZRlW_n)eOX-35d(uu{e<^5#K+wy9V$~`5T z`7s%%B%2l=cV@r`=HBlCnt}K$cp;!*fpFXHc=CAMDh1Xo6PQ^()x+b`gckYbG90_% zv2|B+I#Qf{$nI$0v$x*A!rG7OLL?jz|-!)fZ$LXZkT=|K>Usg)FCAJdH z-aj@glMBYOIVL7{j}#0mi;~t`%?7^ntA`FpwPDZ2bK(0Ldhx@jn?0LV#dzqZpb2w+ zDc)LpCv9YiD4(@LH55{ZOQ(a`msrDrmrvkTmQohdDX*vfnz>X`V?iOa#-zURq*0FR-U%%BacT1J4+DMPK|yyw}Xs_T2M`!$2iJ0e$0IOh|6r z2Ie_<&NeXWDUn90-v7sqjwlBHFSOuqh?4)PL;QA1G zE^&VY5{~mRMh5e6O;t_!s^M?^*pUAF?4KFnT#c~sE%t?NCV9h}4=4Ys$^0J*A^$C+ zgwn6;txqD{T~&N zDnO{yXW&G{G{H$djxD$7!y_737z4?dp`SS{&oU(j{I*^`L<-Nt8+#ACSX2)~`5h68 zMvp;E4LRkqEv^?U^H;CN$jm??JwvKzPY;-RzdC=JGX-*P7lpE49tT;Wn?Gs=LxD4l zD4oq!fXFoHjH*IDPXGXn5QPAe(g4`MNO);}Bcx5~OM|Xe(aWdOp>Zf!PvMn74Pt6hBX+4^}#cYW1K?t?X z@)Pr%9-R8S-u8hWo8I->yHzMYFSlQyViHoH{Wi+paKqDI)Fn$^5OL0nh8c%A5l^Lp zA$wtxyLk}E|HMgLUqRq&_BN}y(G0V7gTJ}y0)STHBvZ!mc)YK(HL;4eosctq?rt9* zhq!xv>ROJWc;SWfTl4Zj5GXUIe7k23rL4@SUowbq1cz)akSBm_+lyUJc9GG34hE_x0gaF7(a;nYokPu`PWFV$Z%z-_Ar${_NK(SOa#H{TKXCKft^M z>L*$He$a9EY_5_yAx!i?wZ=^`h9VClIrq#~U^Tm8q@`aeR9V=gR zM({PC@(qQB4pffVrSYq-58n%}?C$3cMs=@Z%6l!ui1-aQCnJ$Dcp1o7cBpv(3>(>p z5`_maA!cOcY~m!wo}Kc^r}o1AO24Li9<}2~-sKUn8ef1 zOS6t!DE{?S{EegJ)c+Mx^0c3!;t0{Fi&EcGctE=ZS4my8odR=k@^?VG2t_S^*t4T+ zPP-5GKT^--sTly#qq-Bj>bvptk#CNDSy`BNGpy^0M-|-rcqWp+{tc8Yw0F=GTcwNg zjW+&J1cx2kcU|1w2Z{cf&uC?OP^s3thnLuLo}98Xc~#N}4YyuqvfQ3P9tULLx!r=i zvu`{&1d||K=b=R-M>R}dU0QQ1htCx3u{)Q;|$JzUz z6D3nk`}MvL#6qdxGj$$vA|W_e=MkxH1gW1c@2Wob4)kbaDzBzjW2&MD&6h{>=s8** zHzV5%4qH^*A2yL-t!4j2jc_-b36yDPhP7hVxjjd&WDlX_?%A)AE`vBbMRvs_tOX0y zWCEt_n()S#!WwQ@LPunjxRIS34H=J@MOcmu!krs*!8?bSP~f;s!SZq&9#r_n>Qq#R zPxxc0ls`4#H2VK~#Q%mU`QJ{=ziwy! z2T>An_I-vV6A4C#CWEZSiC&09Ba>|DFml{5-sCbM;-HyWNJ3P@P|BZbyNjw5Tq>mM z1oVo~PhhNHFdiXJXTF2-Bf$@SmuuB3t^hXPlVt&K25`Zw$&&X(0{Y}I#)v)df${Ut z*;eE8@K@@0X}`5WIB9%j*SS`IJoRD5g!cY0Ms8hosMhVothd<)8jT%5nEPX9X~sb7 zCu^jhXElz^=?11pmV#i9u;p|4VyI_dsg5M{6C(#CW9||@h5vQe{0&j^&ri-jS&;lY zQBvQ3;3W(D7730mhbdE*-(nMuvbF2gR9Kah^jIWT@UGgv**nl&BWZFu$oMOuq+&VkA$D!JW;;{^C3wlFDL@!uv&RFw9`qi5~z<*_%Q02rh#QfOqx#fHpqz8v@ zwQWUw_2|(_iw`q6bwf2lBD4uBw*~b*>W{<6gjM9t@jM(He!(?8*h4tKI{LZa^n-DI zn>VFyE<8BN0R@CW@yZ9PEfg@p+yL5VWg2@3V&u)njI35xA24Z*J6`4| ziez%ft>Ov6slWTOh+cmXN?xKEh>jql+*4bQtfY_Pq0aLYM%$_(+v;${Wr<SWO)p-MsZ zU_n_oDBpeal}DllEMFE+#Qr;h2|{UgM@3Pxc{wMCT@+0@_x$RMz}xf*Qua3;+vW|cWtL?KZ9|kb)dD`51 za2^UZ9RE5tUkJ^~3+`efRd9m5wN^RT2U;x7guI{Z$I38a=UDSDtBIO5_ zcgy9YOVJwr4&_uh%=vw2ro0=DmwrCf;4p=g`aKam!L=aD8sgO)(vA$OwXYRkJ%x=+ ztp(2}`*7W0gL$~I9+lI*xvIkJK#>1#HC@**vh1q9af7-AOCt_cza2;i^K!$G3zO}r z^2yz>IH4J5)2}@fYwZN1Bs1ox{$t>4OJnlBv>jbU3xAjG>jkN#n%~1_ZScsOyzbJ` zT>NSLLxMK22SSoQXJ#$cpvU$R=ZN-x5J@K6tMob#jQ()=84~xghuegX&(C^vREoHG z(>D))lx~sbog|8m?^GPiRk{fwZ|*$3aWg76Gg9y7n!q15+a#Yx%C^2A(ejcOkR5}Zgwro^<>rH38h=7kJJ}J zGIgeO^G5`umc~A|nr`%MN~v@RAVEyPcc~{nNq8*qMTc%?37mRzQOGT}5#HEwOjtcH zLe{;szbG0=$eFjWhy@&0bk~V?>}#1dhc*LZaQ*a*R`QQRXkRti zX1&&kM|Aom)GaT=7mYN@zN$fxl;P#{RSJRJn8Mr+2Exj-v(82EHVI4y57KwDv|;b5 z7|97G!oaEFa@%0mI zZ7w?JKC+B?Jx<&}*Ci)}f-r~9{|Px| zH6(-!T(?;Wh7>nb3z=7Gu(d5QYH*7qycN!>k@alEy0$ho?t@j>aQNN4YB1py`(kMA z8(Ir8&wKS^id&#e|N0IY{U-QHU&-E-mIq=tSf2{KehL>14C^WD6QIwisLeqy9)1j# zIY^8|V?rQZ5E)Yi${aoY^Yc~0QoU$5VcJKCymSXQX$YIRbl6%<2&FFORfzEKudBc- z<3&=p&eWqzbnIc#Q?;O5_H8xe2$81nagi~^Gnoij>ff*kF2pwu9U%vMYf!3Ii<+sc z8XA^I-b&BZVy2tAuho_oSmtT=d1hV>hon^PC29D@x}Q+m{T<ZtSvSrGSuh?Rj)kb! z?F>ND-q5l0|JDh)Y%kic?+cuq7RyKd>QU#l)OWepB}l2>OBcI69WL~3>^?S~f$RIy zuY7)z0XqbQoXCz=;B~{DX>t)Y_>K4I$B_rINZWLjGD*H2)yc2c%x*)7-0^91|7rz@ zf5`tByq=F1uWjhXj}cLW=LDyUZ&kz182h(DL%HC6RkZ3kZy_dSOS$aQ4ucONi;I4* z0`R$I{s%L;3~b|fdfGew5>oMY9pBFi=x7W#v{1-_(Xc|Qm4k)Ye=YVksU#beBJQ-k zen5DU9rsPJoUR7;pOcywN5U{kw9_z$xUNrZH<^96HNn<#XO8Ww`Ix}T8=!9=0`l(O z(^u&o@uZ-_k5@ zPum!)C6UlVefO)OkFPLrFGED8QxSRvJrPmeEXQ(*xyKCK%klM>OM8538&J$ptu&1Y z;iW!r9`ezm0*5r-91g!6j3-uF#~Qwuqtq|egb|uz@Ue;)-;jHZheU5J**3oh)#XSx z`ERKh<3fKmydoOhl0@jDtx34@w$_xkgc$K^`^9RWkppL~^e#&C6Alj-F6J0b!V<3G zs7kk1iPe$Uc5#Tc;ZQsI=x6Cy$i>4hr}8TvG~zEu6||F}J0LwBtts16xEEc~%_iASonRn?TMZ8%1+Juh^PgaTSyWnZ4mg-5$$k1nuf zVAIdSG)L_u$ea(oz;Y}Nw0VcF(zRB>lm*kkxq0Hb9vG-T*YV5>Jgniz+L>SnIIhU{ zma7%dRhtxMCe`DoeKK=R#T7UxPHL_4tcK{fp}`M$YEXoUmapt!3@T0K{pnB1!_E31 z;*C6w@a2fKRiFl;51Zz=XYe`=C+{oEUgz{cB@>sVnqUV4w~gGtjfsFgO>?@B>?Ts2 zD2C3B8ztgCclx_lM~D%?xg`->jtVFY471y*S`T?$W54V!ghB(?>91Y~YEhw6>cYom z64v;ac3O9~fsDbF*-BghM7FDNDJqshNY}{bDNh&3%4g;3dK3v%5#&z4?Oy^}!!I|z z`>iOw_smYlBo6`$rt#nUtOzGkVz!c;7KWA&wDZ!8H! zgq^0C^L;RC<5S>oB0YqT=gtjZLhoi!{#HlqMhX&<_C*05ahRkYE_+;oI9~|~F*Aff zx#Cjl;Cyof=6!s)F700pedg(oI>XiA;w3zoLz#!ZoXPdic>5vng)zU`?jpPcf$qsM z$*|%~l8-%Hj8ZS?cQX?(?uqa&=bR5^FmI)4#CDZPVY?BV*Pzsd9y8(QoY#8cJ>M>g z;SoomY2e;nN8r`(?Q$pU)Wd=9qwo37#3c9-y6(LvqZs^Tj^7X6MRb(Hnx1dVBGgNn zKfb(4jRHT-WA#rn3PAt#cSi%qCMk26mwA=bQRW)sBT-f@ z=x#nLV>45LK739C{tEd(h&Oz0+^L43@U`VSLJD{-?95L=pF}7-G4}lG)(GHPfR|uh5>C(<6g?%a(I5{`p!@zPpJE$H-1qg3#F>&+VTdovC%48-$A?>#@V=i z%0If{VMpH)SEGFJ4!A7&ep@BxyKQ(zYL)@lNhiMN6v0T7A@X~5ts1%5_6C?;Z^UY##IPe5%=*7C4IZzA6Ps<{{ad@?40lJ!ULrD; zhf=C%2U#-a+_W*IE3AaDZ%U0mC9bgQAr<=gMFJdX+8ey}SORLC*H1Z1T%T{dbzX(9 z6hT9*tY-P-O|Y%G>QbIO0<8nr7h6pbyAD3nlDwq>5!y#})@3@e`lG6CMtBumY+Bt< zX;gtF_j$$Q-Z`M|T;7iEo2@VxtRxqBH4H94kvQ$)*Z@Y}>t3I!Qqb||lV`JDeej@n zP)$|Xn!S!$UyP0=0a>ZwRbh|LMEJi z8c|id<_JBPKF!DPH3MF{F@=D(7Em@(`(D6T1QSV=7d7ka;EAriCdK{+7+n}>7yR6X zMl~EocU}g=nf(o8>KIQ|3t<|uCz3zBhWSPEkkv~Mk!E)L*sY(X=+I7UTbS*M`}$A*ChsCaunj7| zF{;FgwGcBspO?Uw!(w`;^3mKSe!B5qlJ7$g*Lz+an zbSvQ&BXe7>Euk+)JHDW4U4o*{XK!nk5v+m7@QK`em2iL=`oEmhCtP%r+H5uVFG6R7 z^Oe#g!N4tNfRX_|UusnVLHS;~Pc zR$Y8gC>lXx+nj6>yDJW#)czoJ+YTg4S-)>GB_Y$nlnyeR3VeT0r@xI@2WtC}(Yz&G zci!vPZ|Hxt!N$j)!1T}pY}{TDm8eq#_Lp_IXk&BXxlhos`Y$=i@$Ko2+zV z7%$#kl?q>N^ZYm$Jn)^bWkio*71o*RPaZWS#I+0Kr<$G@f~L|ZW}_ojaFZ<~p->|n zw@~jY76)I*SlmlzalHa~(sP!oY4SjIXwyq%vk5YprpfP+h=Q}eQ2f3u1N^bON0t$C zz|u$e;M3E&nDOI7rh9TbZoK?-wueZ6-22oe|B*-z^0Wq=klfRR$2awaWxrRUakoy2 zJT)SN#=)Oy^NkQyYNIsAlz`FN9ZXK=no#7q`KIp9P}HJI(ULFCg9!FZS?QGosXSGF zkL5K9KlJ7$Dl7%yQQMa;BAYF^9%ekPsqmkAAgZxsU-Fnk3)s3i8Rt3d{(hpk5TQmYxbK=A{Er; zj?=@0Bv^{Q8?%SC07!aQ^g67HQAj6y$ML3c5NA48^fku?4znlu%kR!0NX8)D+`4=$ zp=fkeeA0k#Hhj|^o$7JF`Qr)p_r>tCV@gUxFc4=A^|wS?RN>Z(ZmhmbiSR0Un5{_1 z7x!ySK$z0|`>3l^CmBJC{31{EiH)O{uaUsdn$2RH_w}%yB@T54xUkX8V%=8z5nwv-U|<4F01sOeDsb~dtY}Y z4;${^z7R$@llI;EL)}4}0W=eyVQpniFg&mjRZFcJb#S-W^*rQp`9|LESpalh&gn9oDR^%3q&xr<^qjbj z;$tALui(CBdk4;Gp5Er-kpl*55|c)OIavJiV*WJa-_U~J5GDVpL;OV{#F8;zvnPh+ z$`6LJPl1jLsCJ+vGvf6-42#I^-V!$lLEk$HrnJ_9<Wj%0YZ)(xN>Yqa6vEA<$Eq_z57+!QE2T;jGKu7}FZ z2_5C1<(T@FO_zRO7o1yvye__@2+gG=9!9(LLcCJ)*KIqe@YFT=%u8BrK;E*VVahm$ zk>y%0=T`<`K!YtS{OStOIBf=pptgPoj#h<6%{Cze4 zk|+_U8)l+s{Dw})&MAr(w}GDgFO0y5)u{7#df?CvQu;h!+Z1$>lH7C1a#gxx%JHdH7ZG#Mn!jXwWuv zmRGzp0B3233PRn9Tsmgs-R25^D^C6fE&0ci{Of3mor^}@+1u?x`#)JT3T~OL-viY88BoX%_M=Yz6hc5F-ki4tN%p7MS;V5_2RD_NJ;j;sQIV z^emAY5Si$2v)DX{ltUDIuGcpsYxJgh&9iw><)|Ln-kAhyz1!|>@6N;>^cCAFnud^j zzh?1rbrsQJJ0a7vgJ^jTrlFyAG4kcOk>2$#MfIyqTH8!Z&`~XT{a$$o`bIvKPu-M< zt3pyt-zTb(eJ%V(>(eq6(i;1qVfhZXk10vBD7NAjrMrzTLPVihbyME zyjH8ve=z9{hL6LK@=d>k$2+q0nlAJ~yW|k_M0_WxJ-J)V>Cp{Z&4iB z32k1%u2Wn+1RGNm>ciKE2W-C_XmQO(ao#N(6zFYQD81bcmIf5HMVl=Qm7Ypq4iqh z{0jymwdp^Yo8O=%|7~Lay07&Qv_$UCFDtSpOUyd0h_n-JSif@igZ2rca%=0Kp(4@+ z#k=}%OW)kLey9(L>yze4Tp4Z_;QIQuq77%sFYMo?_zq*~s&-l4 zX~png4^K*cCBl>@moMZK%(Ywm=7w8z)p&eACW{doNR>@J@(Ds6K)>tZ=S;d55P2M_ zU-j4v$#-^Lvij@+K%?;?zM&2GWmpWpsvpCv!K}|KGU{+v=)j@-h7&kouFT09+=3?F zMY`Ru$Nrd{-=HObJ~@AKGWk1N(j}N#+15>3Mm-)z?ns!E3{Yz&9(5gtT_wXV+hxAP zmqMpqBu;;8`~LEd-fYZI@EwH|y z!RkhK9Bk(mHn#~LcZwiLta9t_w{h4kclZy&te47EL ziz_CE-3de(?mk|Nwlw%T-rC2RFaTYI^6z~=FjKlJM8jQEXM!`Y$%sNeUHx_IZe&2mF1Ru(U z@dS>y1Bd!yX;HFP*um9faXp|G<0m9uXdL+)RQ4OR2kW0? z7l-4UFd#^pt9Gps3cI^J7B}Uhki^p7RHHg%v#@d3dLDz42EhSuCn`Xp&Z54SKOMKX zQRQ>h=HLRQ+ikn3cHq(PE2Q6(gsXv9rhSUKu;nOiNiu<*T@RXjPp{tqUFI*8@0GQ{ z)(g8QA}xOunC3 zj!$Y#iG4TU1NX3e=6#}DlvyqiXG<`Dr})VNzw<^qUxU)&km6b{Q3KXp(jkBBHVPN&||f-+!7x0^WEryny+h+6K4!;7AC zFYQZF?%V0KWA#K8;GUMT6H_xjTRkVidaD&Z>c0tpple5N?OBnO$w7RuvH$kg`_-`T zx~cAlZ7S?}sQ;2|Sr76q@7CoSOM|C*Z5QH2RWqqbk zJjVCFFDKQv;2Mp{N`p!V}OC)J^)f z9PUI)`!}{{uhoN4Q;gdS)p}&PV1GW&yBne(G3>qdx)$!9YEX4;tp`)#18+92mBY&7 zqXR5M{rKyMAFt=NJUBX28(nzd3D`XmrMNhl4>_KD)!#X_p~KzRk@ECSXg2j;V7R1= z(4Q+NE-pr*EVEMUEL}ZJ>oQp`>ck=QsncuZaVeM=NVIIOG+`O@gp{dw2fSW?HP1za zh?IIgtg4{n(YB_q^4R=$09)bh8$jjih(6 z2*1a{1yRAMlbuj;d8~2YbRcdhH|hu9u7o{O$qxf!qJZ=9tfEeph$1bNfTj*US}k2?gzU< z$+3$+>NzX^IHCLoE%~R{@vpbw|ALm-2A%ul;yr-;tvf#C#kHZl9^h#ukCR9;?q7XQ13hW>kttTRB|>tY)d1^ zwr((R+#znl7F|^@my(gIKBTP51|d%-aqvcQ3()OW<;dIW0&{(?e0IP3v17Nf=v~H6 ztkCR8kumCrrlU71wpriBeE*4X`(_=app@>2o2gIcbM_JSkk2wa&OAdf@a*-~z4v3_ zq21>A(}aip9bL`pSQeu1c(p09>e7qzR2yrgI%yaXug&`^djJGEE8?i8 z>_PCd(v?dzAF#M=l7aOi;lV!AcDi)Q8)6Obx?X4W!ReHR;;JV-C?lA#LSH!oMrS^U zXp5v`Zb_IcOIHbotfqW>;ZliQiA#k7&7>TcKE-eC1L%`hGwl(=)54At)#pP#-oJh+J11CNly ztBi&4Q&-m5=|v*EJK9@1x+M?EkN9*^^Jb&f*&o-=Lki)q2g8jkE`gUFh>+ zr(Vu4|IB=-cB*lo4MsdMth8OUI*$Oh&Ef?15Xn&*)G<}zWtePuD{}7W6TE+Ht(Ti{ zbW)k0*&fv2ig{GucnrKYfb7#Ag4YT^1z}Nqi-a3C94vx^wEq#{+2@6pB7|s zq^=fYr;aadBJo0ZBbw8nS<}$3(~O~SI2V8I$a%8kat1IA6#GzzW#hrCx~ne~5|PvN z{p&YjK3KC5a+xVJo(PnFxMkr^CS)2a4|>t0W6rOpYn!^_&_>erv}ar_o-7%%4)lva zDt0znm)m6^At!ErqNo+`aGH?bTS^Dt*}|1!>IQuA<%lY8O%~o0IeOZzybv_MEZjS#f%#_GnnW89xf~T~r zeU{$f)n%yHnqG`iTze>leG0*jqJfh-!wJ%>j3b6stzo6U(a}(;9QVIkxO$l(2P)F& z_~so{adXq%_7A(Vkvd%CF%@+Pd{0&W)aX-$KLdnqN)LsAImy|oiqt6RF6AxV-H-^u z^?jjb6bWEe@*#((!UK{HnkZuefeP8!**nB$LbkuVU+%3;n4xIh(RwQnR&pPI-gev* z;nuk3VCSAa!|N6v>eT?`7^KX$pJo@K_2V7$yj#H=fFT;G)$%r+^9(^gx=KM$}IXs zf|#Mt67#QetogP)F)7!ETf_&S^u2z7%GN)}m85b(>u!SryFm$D9e#1$&D;~#7q^c! z7L>tK$K$z+2Ws%9uY|N3TP8d^{`7LR2*yl$_*OduQ`%|_Ltoc>qv$m zqy1q`@3U#(p=y!UHjxBus_)X?R9NHYyP6i_G9}<6l8vH-_8|7?vggwNNFX^8o%ifn zI&6Qb$x`fGg$!!S_vL83F^KmTZ(T$px*lkDW?+s3j)e4XLxpSzh>=t4iq63+RzjaG z%?*&!+<))ID{o~3-SFB&dBD_W`AQj8oAA3WU1XhZaq;cj8gCSp=1=`P#*fiL;1 zfV06w5WK$2`%BFE$h9S#ZHsmxe%CMgW);{BUaioyjXxgDPWG)h60$sl8(+Fq4ioS5 zyUPtv&)VV>j{U;P(=mAW_QZCNN9MTY*bA@kG70GP_{{vv`cyo%O5VUtSqTSLE>!CO z(t%D9r=@hOMobvd*fQ;v1Bnc37i%kWAVPz)V6MFZgW?ZYe%=)a?t_+IwS?Kww>*yl(9#r`EKea%N`w#7h&$C$wCR!Ns_*%$30n@!h09e`7dQzA75^=8i>e{?ip_j|D-f z1V@sJVLN7a?RGx+q#o^6BE5g6TVjV2ji7T!5uT$AEP76D0Ba1gv~u#{!28lNZ2yTo zV2MwzU7X2*bH~k%JP*{v-t%XZr)YyA*Io7(yJRsa3v=zC?9arT`P6*M-Q~E`vz(o# zzX}bvZ{8N6N4PWpjGvCHD~A~(J)rtVI5?6;R2}8W!GsX&`&rJ_IA*fDY4Ug?R_^~I z-ux*QJS%y(bFNf^iuOvTaBBuo|H`e9rHDrfp@LmEjn$#^22Dqq+C>bSJjs|%#Lsxp z6@g>l3)t)xRyayk2dR0MogwCgE&an#@?r`iUdw5<-Q6S#RKCW9Rh*86H}V&AO(tXE zCzav{Lnj|dRKC#PNF9OZZ?1@Vea!_)sTZ|cnOPv0&d{oHFag7BCe#Fo7^lTk*A(+x z3ETKWE~9+|NuYd|F7$0(II2zCU7${Ug0DIQv@{K>K|O%hL7sS@FN~WoGE5g@$6Nct zEq3*Ak0dKK>1{dQifudeZeKRxvQC*8csChL4BzwLTu%dKkiKEsS_#I_(`i5Ne+|=( z$|L#Ob@1WzNbIYjTr^(}v~T&DiLI4#onIfu?TYHRhJx@jEVq!o##wumlO^& zjO+19!Byy5Asi^xk^pH|KLoC=W&_KaP8R#`l}K^$nTC}dfvlK)845a|f*Col?sfWI z#>FkSc9Q!O?|~|5qwmK;RJnZ^8!ZVz7Ip3uPRL-4GO zawAs6)HYbx)xy)yMSe%_SE0zNTh-4WUU)Q&I=Y#<5N59DN*0dgqH0*~_@llEOr5QL zT<4SsWvx$h?K3mM=>=^`EsT}uHj@hVlcUAJEVjb|El-zfFb2x?wtkD{|Vij-|2{W9Dq&&IZ}b!^Y%C zQ=3{nvT@v-PH<*mm;6sBFVyE_o&XlG|2+n z8;X_%JP4m_5BV6bsKfIkThB<|G(#iTmaGXPP5nca%IfOTb~KDg<|!S@fU}N93OhIp zP%gAfj+HnUkgaj}FmpTs=6F9t;j5{**HqR|=UE<%Mv_!84`w6P2wVLg%@VBJy=PO@ zu}XOIVum%TJsT~)I3%FVo#G?b*pLN=y#U&`?Q90psIUepr(VPth z%hDYKC$qs`d}TWBHW8E8sPRp0YbTnel4QmD5@tWT;hI_Z({S+U=TVsxoKvlh{<_xw+nJ z``CyC(v$f+-L&&yO@miVm^>HmEH&ZEd!I} z_l+9gOELB+RZ(V7J`Pvv>Oc2Jq*YD%yin!=sh7*kwqFiG=_(t~UcGqu68fNHRyh$* zWzYqXkB1?tp`ZQjkTxQ3UD5FMS`B`gTUGb4O@{J?(?1!B{gI#Kx(S6)6&C%te4EiW z7L*>+y1E@m1y!5tfja|4__3xPmI(Re$K+SRr|(Z;#VOA8x*(3I~lm7LzXjbd)_W zT)F;}R)8TAuB9zEjuPF3CEMwboT7S| zalh&S>A`eZ%9>#)J>G`ofwf5jPC4*6P5RjVnliZm;nKO_N7g7&yJz;yuR_Q@ugG!d zb^yvqUHG>0AMrpm9fsP3X_Hz&iP_u3%`o%1bCbAjIALlr|6tzN8-}$;O$XLOSvtayYcg1eeD)5*Kuy1^uf!QV^7W@PPO7~{=RJn~O8Whkvv{dHcB6BfW zfc!^2e$nrL@7hnw3f{jHjCv}gb$m8D6^sGdSxr}dCW4l$kzko zH>sUk+q2-|4Qibkz8Hw^BR@W!K@>+n*+d9vRieyP+?BJ*mEbzTZJ|M!#C5B+D3wv` zBZuI+nZHjvmUEbqeA%0c)LXYoP(QJUhN=PI^rmzq=@?|bxkTLi_9bOfy9PtYxO|-i zMFEy%OE-##m_pzy9ojs_GH7`%`DwvD1L-UFw=L-hqrk|%58{DIC_tZfYT!fzPhlmy~+k8m}_Fu|7(lv`9g+=n$!wtfNA+gkJDYO<`&V1$VBjSv{ z*j@h;XxofDQ&DbHg!z$#2usX`=^Qj2Rhcg1$;X4Lf_@v7nkbkPSor=0A#W75>3XkN z3&$Oo4yJrhg9N3HipY!t%vtxR`7vCA{Bwr%!$#T2%4NKI+A9{6&5q@5C*pIgghcN* zzc)cfwv%+%WUFyGcb`?BWjbE{xc+mK^h4Zgdc!){GXt-(2ji9HROqf#l9pY|#Le}M z!6|$}m|#WR<-S&euDRl;_U$?FqYIRrSsdU$yIsFQOa4)Z_=`fwfqS+s90ot|b;DcL zgZswd$~9SHg6FWui;bCsYz&v}zSjMueTTF4zm^M`Yq5vRwM0*^A1j+!9KOH&TQ#(2_x_^OKZ;9R%n$`6$e(4sP9fv+3^RdSqr`V8|H^$Fige9>c?3u;rP72kD;o z_^9BauHry9B;S{MrFfG_0Cz-dYL#A)q*qC2ZWsj>?_aXX)8(*V)kN|bbvGQ$&-LK% z%R=#@f)(qA_fRb@L6Ll~1^La5?rQoCB6sm#`?gjG5kkW=zmy^zeG!9 zk8jiEZW#l=hwG2TmME|2C&vHuvHONy%O+=c5w-fNwmU*^gh&4v=r?91{C|#?{0ULA zp~@#+{?!xT$R^CM)wM$KSsKfxkG^m;qIsY|ssl#bj&I>jEI&KBhcUrXIHg~UW{XFl{(3j(g_MUVINWY4aCIj$Mo^`MMcpV%pE{<1cD}&(O z+u~vb3ZeAb7qR2cM2@ZF6_MPT4A9g0(G$)efWn1B6RgJlz`WbGQ!vyEjkj=|jtucZ zHqLXmi?u3%^tRB&uYEyqA!CxEp}hz70-ef-!v?Tw_CsA#RT;{E)wx;EHUOv28?ud_ z>_@UAcjk}i4T7qyCS<>=z?RY586Sg*+&9s%@cq_3pl+|f{{CDo__x-zt9zUOu@b)_ zO8&7D|2k13E-XA{WK)Ce*Jk4=rke2bkc)yc@joZbC2_=uhvU%+J`MM_TFf8H2q;HJ}?A1~TkDYeW6{>_Np{ftM zgcIzQeSxNjHT$q#Q(|OLv>ba6c^;xDA|#U4)Qn9#`;q1)yXr=ECkC^eg0NFPME11D z9TCzR2(-*`+2R@kYd81o-21*11K%`$-nYL2^;mYPb0#2eriyW(6==lZ54NJ&kxg*T z?ws6^ zbpDn;^&6su|9?f4JWJf0|8lYrb*5t-OAf!sbIyWBbm#9N%Wy(tV#GU8F>RI;T5o`y zMs9Ap^>)a-IcVVjwHlAH^YJqAmVm~*;?BvZ4NyYMk|Q%Q1xqK5Z9aQP!i>u4=-9Y+ zEIL?UXY*+o7r(vGia+)ixCJMB?U;v=*X3!q^PNVdwJ}qQ-O`B~l4l<}tql@=!e>mR zS_3Fnaxua}kob66FAbF@62e!3_F1{l0dW49NT0)3!eqa)UO95P3_UoEcl~f^#+qHN z{M@uN*k&t2Re8GxRhBZ}b8~lK^|4*wRF1c!@549u#8%w?*h+pwl>BY>{(Yk4o^{b) zF7|Awy6}!S-%kYAa<|SgK5hg4)|ZmzgG7H@*=-{JMHj@0U)|p<*84V477r%ceWTAo&;smyG*TRj|@CKbuAM6}u^RB%eg?>tV#{2J;;P8o#@?E@b zFk?t-m^(NFXPCz~sGjA(8uP>Ko8HA(z3;GOGyCU1Uh&@$CI4+={<@v@52A$KMRMvP zK}lWNc3wUyV+uMY=+#RF#^I*iz_+WqW$erz#GV4j))@ChnAQ+oI&x3*XXWsn zyn%EuhN$ z`^%m9KrXd#?Pvo8daZ^(2&l&gepx2h`7&|%Jel}qLJ*_&pb5D`-BG;X29s=wz$&#* z;|UVe$iY=mDE6WPWI5F7RS9#F=pB0Z3Ww`(dC#@ew<>yJbc)_K>nRb+dppbfz}srV z8gzeGo3 z+<>>Y^Ay?9ApCy3nAE$Z7cK#%O=epghF>>0qGM8to^LXI`3U!}ec$X)dOz%j4S8AZ z%MCp!boNm7;Rii)|K^m);ro0a_6)%cf*l0nVFgQU?_pzrK#7QHnC zuGE7{t-g4Y%_e zZICCoNhXOs3l#ctIl^-4z_Nq<)|FRr*ufShqI?{ocbBt_%)U|-l~>=ex-p0eF8m*E zh}K|0-I<9)l8-@jNbz7zTm)zwSO{QtAfm2(79y#mgJGuwxB8858E8dcY2v9)6z%64 zG7O0(n1WvU8S9-jm~+97dEK4J(0#7{Y-G6>Jn~C+9_^|invEq;%ijbv6YZ`A*9w3# zq}rd{s|3BHc^qQ$>Y-HRg87kLLg$@#hlZyl2?E5fa?Xm?qv7$i`qE8ZD7EI$Lqp~Z zn{q==Ul#9%#61h0j+vEs#I-&cxAuatvKG@$_I&J$(_|Z8AWYLv*WHpLijLW@p4~Vv znT@&;j2{ervN6)|iEe*ICxo~UzLXni07*aEhD4n%?A32HTs@SHnKb)%j9TQweabP8 z9czT1E2sYm$@4}eQUW5ykM}`lvC}q|8-s9q?ikEZyoL1(Z|NyG+A;a8N_P%VBv@Ce zrJniJjuGAD3|uRH;C|_&zvk&ycw3xv>BP-u5ZkBZCGemU-tdnJn92@-p_gC^^FS7) zNk?R4ZfV9+jp(L%@iNGfc7K^*oQ$9OkNZg^j^MM3ub0F<@=>K<^y;bnG*I-uV0v$o zXj*sn$0owbx7E7cw_4ysP7Kqr z!d$$jv2{zd9HCqNan+^Qa~Kq@>a0cm(t%#NLe}|h5nA2uXtPm{1pQ<^7r~pvYdRvZ z;nvpydE$p>=v2C4&h3WD1LD&CbWWY>aV%l6@xw4DI44md>mK=yM4(@2?t&Rnysvq#;z1wL3O%wBm!2}0!E?%5twpjS z?CAi}+EOPdkZ|C?L6AR|NxLj^d#f=wcl)-bg=qT436L??kdth3BnmZ^A;ThE&QoH1=f z4=IWsOmc)|Ik%pxc@)YhN+FcqyaA8NyMU(VdoV@;QchIhKEZ@D3tO8wNF8qQe zIj9;tM0)Zb4o&}Y%KS|s<=nqXlvJpF|B;p00&>Q8Z(bzC&a6_~uQLm@p(5>_FNOQ# zp>ugd>QHYeJfp4YFA;kKb{CAJOuPwmjX7akHI{tz;!U2YU}?Z@VW&FEjnBX~iZ5^0 z?0q41?GdX`S{Z8Z+0gcn&cZMIquFeAn$h`k(3T_DW00SxgJ#Gg4R~6T;}j^pG2dZh z)N!U6z0&RD0;Y2DWF((X?Nd8cIN6!8|AH#ctO^TK@Wo-^Gwr9R#~N|hQwjYb($-*-;M!>gSF_bf*1K=hk%hGaz*mX$uCwRjs2 zx#cS!fvlDAzUZcY9eFHn5ux4JR+0naKMJCLvgBf+hkrt?Q#tZU_LQebyTYxhA=#aW z3Fk~LA2klfV&JE)+#kr5jV?`Go6cVI1}TA8eA?P(fW$Z`{wn=-xX(00_)aSp} zA`Vod+}XBXwOl!bySWPTOoZ%#>4RLCM=&U!a6F^+xD<6&GaBj0YcS$mOJ$S11M&ok zBp>N$!Gs+bS08-|2Trv|Zmf%WC>3hna_3bs9F6*#vvxQGipu(5ly>IA$GXRYFJgV6 z?MGn#gM0R9*mXFJ;Z+H~AUQ97jl39ihg}mc`DTJr_N%JN;0ok^TWMmdmjyPaa<=Mk z8{sKgR?}C)?w!}&Q{ps#IeHJ1Ggnsz;Mxlj?z*2EMDTqbi`L_Gw7RABGUn?`z?c~4 z$xb52Sn}e<=sjNWP9e80K|UXC=QgMol2SqD-hPMnE%iV(ctNqW#UIL_k*P)!6;4ix z@nPmeWgu8CZpbE@j74UyW-pH9fyUb)i3i6e@ECtCd738TWS-e&22!F!ljV2OwK)ww zgGiBAXCkJ@e-gQKB@y9NET6M#D5$B+P^+A)#A_S=lOtye!Le7*ecUqv>wLs_e_twx ziksq>uWu5Cb7ZkUbdMFnAaAlE2O(Y^jM`ji=n{&Krzp!0-$=%ZP^OFetiHHOguQ5i z5Y1OaiHu&%4#rQ1Ikn%9cp^(hZh!WHKoC4=DOFl!ja7ZKI;x?*(7+U+!dw&v1_`nq zH_|J?>G%MN9ajnjCMNbMduM@(?&ymDG7;O5JiKVo9D`sXAS$*E@we&ua!GpStTse zMD9D8o{uR4BnQsNl!5n&{&o}kO1vbps_ecc5Eg_qSyFCQqFLC9smZG6@WbY!r{UdD zjJrpEC+}SWyqIvgx`!~2VVY~d^KDBRM(M~4DTqX&eEFUh7SB{L(4C%jI2!>T!6t)z zbDl7AyhONUR}V)&EvQr9mTgANQnI%uY-m;)fXz;L`Z6`6OZ z4aMijz^gR%Xj{E>Xk`xAUa}gGM+8$YwjXeTv8qFAf(o86o!EQkYgGY$3bJ6eC#nId z}NYX-i$>cclD*#YA>x@*{;TOrldfzl&y-s087!v{!obCKEE(JCaS z20MkO=NnVYF?J8xY!@_x;-+&2c|s`|WixWwqc0uaQ%OaKD8~_7>3()@uPnlMjIyIz zEC@Fbm@N6!Rzo8nC&RXmOf>2H5^U#~4y&Y5+hRUGgNXu9b@O>AK%+>v=QmRE$uA&f z+#2*Q9u_{w2I=b+!->$h+}6>RaQjtqXnklDe5N3e)9fN__<5&nj#2tSWR;)Euw53+ zIm$c}FbV|LC(1u}t4AV!kYsJ?|_$$OiLYuDR^oggMVUTf?`PYtgLOB)3Y)1Hwl?m3<}Fmx!IRRF(0G z=?Di~`ghVyL$yF%)I5BasvNYm+&vqv#-OmOXwArOhkeFZL8WR<$)MieCkL0Ya(Xj+}t?K@2XpqF8eDV*G{t$h6k)<^U2PBXuP z?Iz8a8^;sz#M{;RVW}dZ*u4*L*15n$Y4zd6b`$*W&wmMxvw=yfx8etH4wPCB$#upx zL8iIMmLCZ*AS_$|NcyAz^oI0}ND8Omr_b6aSGEx3&UvNcpu#eEHDSC>#Bz?(l29GdC6!&GI&1smmiQANO%dIEX8TQI;uF8796?M8{f~Fi}B8#mL1WVfQoMK zLms8T z%=(#KCg2UbEn6b$q2A?A><5lI7^^*3?$x0No=V4?6{13Mj!uIyM5hYNU;C)-xSfe+ zTyGh{%?8*+Mtb~DWx;|sX_TKN!hXR@bc7=(f%knCOutCMVO%6U5#^QI6)8v?1NN;mioA*hpKz? z&F3iY*KQfskpK;hL2p!*TcL5gpo8=GdVKX#mhCi|0PafpR-7c;tcGEBK+A#!O)Dt59^gB1I|5@p zpGVMIXW(F7C^_jH1kwj}+!2K7!M@DbPxZyT;Aev_^?9a3IOeNT_rSLd^@S~XgAe6n zMuLCn*gw_^~-*+d>|6~Q;r}W-LOIC%??$%_#WDG+RU9Py$#5X9qXV0O@ zDmOTFJvc>rs1b*+>7SXmu7MpjOC8LFSquT(etvK+6TBGpX->J85*=T??6K8A91vsC zvTDhOzElzkXX+X}$u6k-tuPTbEt>mv`P#yr2X=SmsvB@bFs@bOZY;7sSUfCh;Dkz0 zvRt&k5CthWxDARk(fjGzUMlBY2#G@9fKDT$y7}#Q7qJ$M%hsl_Ofn zrBnMlRfE%`TcKgKjYwlAAkh9e8C`C4x3?5!!6@?Xc}u|JA|X%x>4=!2RoGRgp2Ww^6XzLmseOBI#Al!5Lzv9E6YxgR6TW1wl$n35LfJj)pM&VbVMi7-<9Ve> zn9og`YIF;SxV7H-p`<);dLN`L*Iy1l7tVd9;z#Nf;{2Rg)8V0xXqKrux?T_Zo`&VIsujR$%5i1;4?d^o zK64pxv~S4=#@U}cntq1iL5KFsd9F$DtgOU^Vk{lE#!gm!GJ1oTB)%B?IJCpun&x_~ zjxH<=b}5qIEr;Z|ukkcJH9*$?-GQ)%#hMRNPUF{Nf!#8r?>yyOn5Ej}dTw(D+%XuS z${G!ZZE}8e{#`j>CV2g8>_{f0k84%(RA!;{kVY`cLIRxBnti@Q(F@-zEd_R~t6-U( z|DtSYAxucNnQRauh&v~HWp)W=!Z}%AF^?rjB%jadR3qYDvIS%}fN-p>Z;t3JP zPDRjL#=k!HE*sb-zKiX(j|MSe*7^D2JpA@}PFdKs4E9Vo>-IYOLAD`}@2kzV zp!xjTQ_i_abT{RC)KZd*olGOo?>)~Y2*Iu*ll45z?3*DeDX+#k_t^#ER~3*gCM!0qbmtfhi^IVepgaSb!J3+o61I^i)(zmFGp+)`dC-2>< zaF-|g2;T=s9MeeY(s@=1sc(0*?KCPysg*p%a??~uwY4UZkS>Jfao)%0s(f)LgLfO1 zgey8zez?JNKN~{0u4V=4=7Mlskg!6cH>4e5+J7md9`!Cg6>Z~+heUy&hxC$4&_w+` zd*b)1KT0CMVHo~Vhxm&^2%EZTYD>*Gu#mVgNjejTvzKFQNdlKakBsdLx!PxpX}jt4 zvStn$1A63DbH8Huve3bX0vGUS+PnN};cvDX|3e| zw%PW#>HT;=GP%UwzlksvIn7bXkO>2JG=wy90SGo-u*GqZ-|m}|29!lplmF3h3Fc5F^mb?t=kOSHyw=KMH+~z>{I){ zH#Wet%kCZw_I_9u+Nk8F)&%?xpBv8+?rDi%x6~vSyo1G$o8{}v+fjLaIDy=ja8n$3 z#E~u;jiPkwA?0heK(?<=Iw`Rek7w1Fidk2{Y|}H%Z@y8uWK*qWHTni(RqG;4jdJiZ z-R1TJX&E>Z6vd`;G8G-a_`m$VISOSypEGN_P!G3?$eiULPvaPCdAZG1B2RVG{O!^Y zg~;HU)c;DYhOp|kd?{p95BzZ($F3$cVDb4V$}+7obV^R3)axAv`V{AGouc>PDr|Sf zpk@HX?wuAtHeL-nbzLJz-whzCi_M88+Ah2%dPcgwIv&HGWQrPYWMIMJ_~+Fo-Ej1C zq75TQ8#X_^KeL{c4eFEri(`jV{#c3M5GDUuiGQ6ad2bJ?6$B`??b;8^)PWwPQ#jSJ zn~)ux7xucK`f3KL^F`7#V~Zgz&QE{WF(TXeb?w3F(qzK<(!1YwUpmCcg|SOxDa;Z{ zaq6dffmM#$jJxbDge*NMJ7C-m%Clc6xwa+a?D;9#69rGOtleMB$gc?VS5BVaw6_km zgrXxf2L~}F=KYg*g_S^}#zj{3pp}q?)IDA-%qDPWqeUI24tP)&f06(r;mMr}ulSo; zM0T9Sd#l@7NEx}fMO3g2^z#be^c6>7#HtzSx;p5&u1mn-+x(WVr#tWrc|P@5 z$c6C(wiFlqN3gN*#H6ukHK-gr8Su?>41>Z7O81raz&-PC>ngd+zhoY4+;C6b1U+r-ofvk+M*=FJ~Air0vZu6$YU_ zfz+qxZa0za>b^z&W*t=W413hkmw>u0kev(dN5_lHOz|vvNOygvbcBF3c~nC;9c#-+ zgBu~QYyzv%>Gmy~_Q*)M!^nBHdTtCBj@;KDkPU*&UKy=TN-=OR|3|^QgM+XUAZ5Wx zh?-X11*6{F%Yf#Pm26+LN<6t=t2<=51L8%(4}_4#z?bOoUP|d!+;(Sym9Elb>| zBY>{(YikS+*2iKaYWJ&(N)|Gd&P=^()^lxpv}fuD}*rQBO$JMbxZKJBEDwo~ z^Y6vN&$kZLvnRveId17@y>u*DX0DJQ9|uwoVdb^$WANiq`L#50U)Zk3f0~Z55?i~! zhcn)+g5&d~xmSa2P(;s#hVyO>lH1d6-I_quq*pWibQuQWRm4vq;Zh!TyOf&u=O6r>{E`zFYE0y{4ilyCYEy}AAwQnI6+aDmGD^$%VL za2}Op*~Zrkw|rJVY;|hDz2qC-7o;oj-qO)dlHf7o;yF3?Q>Gi_PdG@+1SUY{kSSOfgd^+!d4@Re_}=8qCUzaWmMtiBkj zu7vUYtgOH*RoVl6*V<6zk$ufO&3Y*LNV|=GS2ZZeZhIVYstbEg-7$|Ph>hJ{uiO3~ zZ|@z?_22h@%Sg&-7bQ|jBGItQ(-uOaB9)z_D58=iL`HV@mc7@<_O$ojduBGQB9X55 zd3?{~cb?~Ub^MOw_+8iS`pbVlyvFDCdOe@_$KxTr)f*SAFK^#`Xb9>_KdI`j6V=I{ z)|D|QfA~QTWt;A_f#89A`tw>H@OfLttMdkF=(L9-IF~>pYYRS$S1pvmJ>O?)re;p~ z?(IE6*0nkm$=W@^x32@jZXG6Hc{+v5i`>1gOzm(y^-9W0;V|UW@Z}EwN(QPUvFp+e zu~<>|K;SOnk+OHwB$r!RD`rQ_7u-Hm2xsCgFtw^4!Wt?RQrE=x_?L+i^_g=u_hqf!DTD1EeN|;F)u^>0VM1Hp4G(zzz6uR@VQi3$N}paaSf`2YINwl*KS$m? zNGFDsYw^^T+uKv&!20JBZ_gR9-`3bUzMGKgjelVcC1l{;ArDOLiE~zWI;}E5xEJj$ zUQF306@Uas;?)p_O8lxPE|J92jt3hw>ewH5;z2j^wU+We81V7U<;&~@hiBnA9GAP$ zsg&GdprZg;2DA2!D#xNZx5>(n6EPrnK}cSwry0E;y2kIiQ4cmvAAhBE6~gEbI%6Ve zhX@G`K2fz20qf%PX_th9;4br9p$|3>;BiZDYBps7syR1ZYWr>vDVntW<>Ew|($N~H zv!apkUQ#CTSWg%BD6WY(xNhWyFi?dJbsczPi4w(n|E11sHNh5m!Gn z+!>SY!m|x-hXe|0;ZBp2_Ic^|OE@+1FD~-5zEd#7h z4D$+Ktw%%B>ZNbztDyae1K<5$p;xo`^C6gU?=l#kZZ38 zb)}1TE3eB5@>fzO&W&(-U6YzYX z06`tulN}u8_@X-7p;@jFSbJPf1$H(AZ$rx-wcVBIr)x6#eXR@+ERJ*TAozzxC00w( z6Yijq74~>}knnKX-*!#?MGO|UkIjdQ4Wg^eK;e@U?SGv$e;`W!+jjitNAUk3O74T9mH4*J;IKqsKD}C)q62 z*~vlvnY|IW=GMKre>X-I#sAW`Z$j>uWDAzB zh@F3p$*3aA3q%7kKXK?07#anKN;g)auF`pJX}ymyf3J`E6VyWKBjdp5lyy+;a6cj3 zX#&ZAtf^U*5bcyFq2C+@@?l?jv8-54G@crnEEu=wz&F`iu8)e^;dSoY7oRgnaNo7n zdymuGpoKN1e9WyFCpBiT{TLgqlISYwm+FTM2S&kOq6kgOS;}RP5(2f)Ou&W~we!id z%5G_plxWmFESv)d?`y6m%ax*@>MCp& z=zXCKt59Hg(85}>8$(sB)8!4zK*Y~9M(9C13Nrn2`?k~%xg%`)N0f*pp#N~f`$Hi` z@ZTg#rX?vjt-UjWEWVO|M}R+m{&3m+S6Lcly3?#aP_~6*c`L7GyuDGzu>Gyfcqn9N zjQLZu*+Kg=BZ-~c++aM;#WzPF8>7|RnaGKlfi1f7!Da%fcq22xwIsL*_44QB#C`Ii zP@RYRSZ@w;?NMge=qQD4oBLixp7TecbVaTLmRL}uD>K_)U=N1E^rbj1s6gfBp%q#}Tk5pTp)u5*k4K?3*9>wYcdkJIgfr0cnnj@U z+ge=P>LBR*)eSSZp8aW0kPh2VIaZSDbHMn}-Oz_u5igr`D1O@Z2;bWGJPxl70p>Gl z%p{IHXwqbTbjdyeKV_N?r<;0!r-qs3i5=c};&Peca8nI_@0-z=%gn;ZA02)t5EaG; zl=QCBeMGFv>4EYdfg(JooG~Ro+W@MP=A8piQ-S7Lfa0)G3=|!|yt^o<0xAwZxn3;R zjv1P39xD7zASf=k@rgeWxD};7>Grr{09o9D5<+Wt`GgsDFINSAZ)y4nr_J$No85B* z?nWFtujyR9nui-o!f}$0k1*h)NHq1E1Yn^lUEaZ*11baL3Qz9)z_B?l?(JJL!14C{ zneu{aY#x}bNv~9cJ&Q|K52f=Ve^+GbOmHT0FPOOR!df^dYnD?FX_!>Fe^m39HU9YM zF2o>Lj_huV6cK8TaKmD(r9Z|BjyzeNY|E92Q_bXOwj8O)6gj#8+FiNG(9E8Y8ukR<+jdEY3ZwzI`t!Z7E7MR|X8RY# zM~>(*Dn8H{UJW9&gJP+VYH;_#1fN^d&6vwa#rxcysOfngjg@6g1~IFW`+Wp%>EZN# z@uRODvguwbHVrg`OEg;Kg_m>j=+@Ft=@Z1^)qdbvGi3+%1S+!5KJo*G__-HnBQxQ} zCGDPo>lH8>sALcY;AgUU zyibHuh!^F-RT{tJ%+&~DtXE_*x(aZ}J5Di)$sJqlPY9Iiroz-1Ux_S%^JbBA&Ndwm zgEPCF*b0Ygkl)7E^Ud`zc%P!^|4=CvPdd;GKl|>5O&{#lTUaBAPU5((Pgel+lz)|Y zHT@25i`h3vUTec<*Wb65J3_JE_v)7uUtD1Km!s+uli85tcjvnm-yLM25w@kfR*#a* zA*bcNiTEnMue^$hQTU^nKP7MivFy#n%0aVmP@$|h^W`tVn#(O*nyitKbCM)%MtHx- z&R!4n)Im7DO4iK!4537%=wqdB6pB{&g`N_s0ULApolGAAq?&`??4goyo!@F%1JQ*`Im1xdBgqGSEF<769+4t}(ju6P+mw)(<@L0l@J#eDinVD$M(x ze_4powmQnVz8J433ZJn)K5ra9zylL5dFjw9JgfVGYO_~4tiI>Cp!~BGl;2<8dNlbh zxOlBQIXPrOQ?*S;1T=x_*{oMzCf{Rat?1oIxe8R7%E?p{r1V*a+J5&Iblbe3nr`|4K~H(2S==w5RI&GvULU+BCIoz=6EvP&`l{GT2@{(+nD$63^z3 zmBCHMDUn~HM2tjwa!aBBk&&6T<5c5^N_^<)=wG>&@YYIvU~Yf43>vaFnMhk!qgCv| zdco9s&?n9B*UoD}X3bdIiU3b|!>yyFNnFYcIdWq*Ek@$iQ@rnWIehT0>p`iM-ZI=9 z9O5ywkO@@>C+hdArXz)~1DB#>8Sd{XFW~(}!gu`D{wvm17(!o@WGbJCHzIYy9Gx0K z-ZLo9h&>x4Hy2Wb7Py0Dffq|mFhQa!SL|e?LEIs8ic9lJD-6qWsEAGz@m8aHUplE< zfO~tgL3e@|e&Jz=E-`rr{&tmF-c?zk-X3+)halnRm@cdrm}aACs@JvD`dCcZLhG~E z9EoyIDVjx6e6d(ncwvGd;tCD+9B$>WhOPsPJMIzp2n}YveWoWXaWN{je50%aLS8aU z&+jXMddu@R8Jnu{TCDCEqiQtfIow`l_>c>F`yae=O(8r^46Gu*>?8;+y9WE8NxE9qIbm7v2Q0{}#Pc z1CKrR1@pD5af|G2wMR|m*mHa5Ue2T8kSaI5)yjj&&ZOl?h%Sl(i;hr6-YfY4oxfj8 zUrC3g43<;URQ4cX+0p-Gt{NUC2RsWY&BA2sxlNbGUjb##<8rU=NbFIcjoM;$KAA1zDW6=`gyjoHM)?PKINY!I;(Q!HLQYBFuPH3X}HuE z5AO^NwKp$<`Mq`G4-RF4T?#kHm(C2Rpo~k_|51cNtm)xT{6jJ0T&>ugX*~Gye3c2l z6oZP4#%Iib+yOB+ogW{)nAg8Kr7p*Z9vMwIr_8jg*=j>gn6QH`FlPLkMr)QJCF-^ zW;&TR&SztX5>KJt!D`4VN7#d0wGiTrFXqiCB~lqS|f0+1^SF0Ozd~9 zN7m!UeMb&eVA7Q0vzUG9(9~pH$T=Q~wF3v->^^!!H&)cxZ%Kw;nYXvSsd7-7;o4*n zp$~j%-F|tjAPppQMgxRzk}!c=Z}P;+1b8i^nPE?mjPEtJ*&g`ihza$Fy8>M*(TwgV z)2LqshVaurfA!244sz&kX4SNVFN*8x-_D1k)N+Ls$;1j;t*TxP(>cTQF?YQVwG=3d z-}aq;vIOV_@2g%iOTrq7af3sk0BmjgEYUb!;e+Xj42c%if)9(W_%80rty zqkZvqksS?5z(jsMx;fSyOAk1|dnca>hquufbE}s^8}E+}ZVqQmUgMYID=t zSFOmHdRIvF!$aU7CT}GXPpFks@h`X+N}*lx@;bW)5x?j*=Hhvt@G437zTW-o6|%`s zd^r6q8b-vIbF)ISAV1P)?et<2RQ>wels|5OA8pA)6*kOZRxrZJUo{`vVzk8Gm&Jk< zzvKBM77>_W+md~z{0Y2u4>3IPIu+BOW)(e%CFiq z5EFTDi0xPswr9!=T`^3@qWbHvLY=DM!1l|Wa|crKvrM*c*JK2qYVOZ(X-kLGme)7# zmKLIe&as`b90^1`$_`G-hE`FaAI=+EKEFyUAN45cjl3ip;^YWw>@`%lDEPxPK5*gMEHXEWxu(_ps;&4S94xI9IGi9Hn>m$PrAl}`b*2;*Y5mBFkJ;(9pvTur6XWp zqv(y0_6pdV^VQGgMJc!)?2X*a5sx!2rxuR&SAk?Vi&65qa>4>)d9xZHK{P1DvV73; z0-xqKfs=dc;O5}eqUHQ6_#*$-;@h2gJmowf5M5b=6lZ0#=-yT0x^(91r$|ENa+;#| zzHBL0o(Q#jAoLn|>Pl_W%UZzj%C+6>sf`%EoUyI0DHW2}<~iBEAf%YxXtf&&gx3+% zg*|&(aC?_QP3JXd2+?+lGdWxW?Y=dmT+(4EnmD%FI$Dn(?e}H$ixk7A`)?)wRbrG*gg`<9@Bw(J<_(W16nHUj{Qm67~G$gx|-JqrTgm{BfSO zDxEVv4ReE6TE{Y85X{2ezBP$_81wFu%l9D+izf~RMOIcozPHHN>Vq|K|MU1jWugaS zp!Y2y+9tRlVP@rUvKXQnf%nbF95^C9Cn9^W6jXQGR_+gKL|qT+1M@`uSbD^zv@1FB z_`a0yNsG28tZz0lPZDs#ZzESHw-GXb@qO|4<%8o<|6bnXJ+x(zzL6;6NqACzbfFv@ zA@;|uV4Du^C0A6uxp*p?*tenJXqCqu1M`+0e$7|PFlBF$Q2V56tfu_5sy-dcFme9(!zk za%uz)UbH_MJk*Kfo`*$TgTDRs2mEgeA^#FlLMOUh&|KJ$dg5nA=-Vr?+^UJGT&Eh? zjRm>MavG8GyFygO5TRp{NIPJ0u>y{_G<`Dn?g5Xjb%8_$5;s-ImAam(gY-z_&){Oe;`T(|81h=;lj4fmi65bBmFV@oNF#PZx0Y@Tx>(TlPamkMy)7Eaz8;m zR|ye@hJp3cLCCzb`AGf2cHAp*Sy-c|9K&JiC}UF#OrD=QtH~AvDizwgLEecNee~{F zss$m{DSv=d@>ZHyGc2*rG#&Q?Owu;R%q8c$5ESKhLhX&b0{>08q79>B(> zd%Y@*g>W}KrCPVR66eNW?Xe4PgGWzfMfUtiM=dGNcL!H{&|XmPsm-1&!i*~KVBx2J z;6BF`8eJKK(%vrz45T}-!DlE!d?gR|7PKunY$XuSeeRoHZXLzu5tfyiULqIlRl{Pg zWwA4DMi3sDBk^4GBVMAUe)fUv}ApmdsV z3#xVg2HRj z`50L*!)go2@;50;i*^I^1D~ao&>U?39+O^lCm3X8hGItv4--Cy@4}yIN^wOq_vC%H zWc1rHvf4yF4pE^#%Ymk&m}K!Y;jL&9z6n)fW3JA?yV)o12?ln;;TD6&-GPRPmp4@du9*!T%$ofJ14d*S!K+y!Jktsz_Zb0eGCB)DzF%!y5oIruXOan)l8wTFA~WTOpYu^+F?u4`CJp|! zlK()I{O9QX`$S1e*jb6hjehJGyj;K9k_J4Rt$D`Jw!?iVnrAhdQK)UWQ)R3bQCRE9 z-TJ*Pkl_BLi=Vj*Wi1|fYJcoO^V!ukhW-E?y0!NUi+3$i(?%~{(H1QIF_J6A+X)vd zs3>Z6dU0PKr|a|L63Dsx3K%c!;7X9^9L}YX4BEMq&D0!BdOJxG7h5cS5qz$HR};;mD@JMO@9r{ABd9w z$H4sacGiCpC50nDo7O*d!%MRTt2>e|y*-?~%S9_NulV&?XZzL=+=ExY~Yj3g7 z)1epqKi#8KJy?xgiAS5tEeGIjIvLkF&K{)Nc0`6FXdD>_GLH52H$lvY4CiB+?PyEZ zEFgQR7e!yXXR>iuV)QQ|O=i1M5VYrXVI>@DctcaqB$a33Nq5sZj@!HoAO(5E( zBT*gavvSZrPJ8s}lPrkiJ|pAA*M><&x8htP38K#B=w891N(_8sMcPY9Z2q=t{*Zd} zj|b-;TuA;-lzixA6u00SMb-x;we-#+V~{8#^Pi4x)l@qa*+=-1!euhUhJQrYHR)w&`53~^iGRvHd_hx2;+rbea#l=S$HzY*%<>(!|5vfXseNL&u!yeszDsx zTIgfv(hiB?w|F}4H^BZqZYCG<+rThA?nKZ~4DNNw>DWcr2aIwuMgvMcu<5*u+2ZG1 zcra+t)hH8;>65(j(h`Fxd2APBk99XT%gR4-Y#e~lr@2cm>J2#R?k}@U+y`x*L^v6n zRl=)lna29$b+{BNpk>`pIMLf_ZDHk$#cnNyq5k1Kc$TBUJJZku0X5364}}wPh?nn9 zIVHBi-jK`Hh5PGa>RzqE`At3ex5fPjqU3+h#=lII1m0TaqzEBo1!l4DTtsKd(jh0mHEK*pBrqm=>~mv$y{P z*40vR>=z&$2n6CXGd~bbwkydywkW6ISlV;u=dqbs`=YX~Y^o5)cZg6kJ!ylWMti?! z5{{Ute2cXD*&1vntAmc^cEc{OuT_3Vt-#JFetZkTMWdiv=u~4VHq%`Gp%&4JmHQ<~ zLKliL_s3Mtg{2`VxuZUFoTCAUHwCbEapeOmc6;2fi6t_tQ>hmvI#Gk}MZg)AQdo>| zQRgvl$2vpjd6HokDrkJ|xX$hbMM5qTk6AN-Kir5c$-NKndP`4P(U#!$EfF_9+xDV^ z*h#u0&pyD>{CuuT{SHXEpnOP4mH4xo?|!CFYC$Fy_s0yM5(&=~McU%qvY zNCwl=e!XeV2mD?=Su2{(!5~$8XMu)hY^r}Fo7vxvLc?SiVy1e~=xKnq*o_{1D>Snm zOWV;?+dQklwHX#@OV_h{Yw={(W$XIyC0H**=*=m+kW!t$)W4?|gp&hiG_O%Ma3J6CN=QH-e0ZR6TLKuS~eLM+)luaYiMnjKG2AK?Te0ohWDCEysJX z3YOfX#pRA9K>S=6#muWzFnmAfmVK=Uv^7HWOX+h!eUv@3ORogU{i+PF5#$mj)w%m( zBO!3=yENa}MnBPH%k0`_83~7^mP*`r5^itG1#0eB8)4f;{YWuhLcV?b{=o~_``32- z15xtdw&OoPg8v6mVwgo9Aj#JP)pv)MLURVNZJ;Uh$-QATzg$(Tu&WYP?WA@eB-kM} zDeh>yOagWluemenF#?{!B2lxM(a^HV`L{cH89Gz1OmCg>$8Av$i`Ix#nhC9Emx3MQ zagt8Se7!RnI=#4VnJvXart(nVuwfD2eO;AS&|D6ETMX^;#9T1vzB|{4i?z^_(c=Ei zrUzw*loCdNWn;{p-2N-MOvIJbWU^WbCnt_r#>RAD@V#%^$D6Wn zE_Zf4Gp7pqE(D(Kp71~}9uxUsrb0X(&?)Y2kO3PPc`BFAcH<3_&rz3!Nr;e;e|f8} z0m^O0VmuPs@OPnzCzsA31g_aJ)UH*-*@sgO{xr29?vPC9P(s49t%i@)q7nN$IoayA z^~2#84-P%qUJf@qOdW*F^Fc@xnebDYdZ<&9%&+wl9H<1#7QJ1`9YlTSrD3t$X3khi#6vYqDmt*Fe+1o>B2scsb zi%ZI91CgHYC(pusIKHmC7#63W4QEK^uG^e*K+Q&OKtL&suo2q4>KR=MOSh&BR+chB zQh&nXm;nhRRrS{-KWE~8`9QI?m)WRZ;7TnpTnzjAt$y0d6hXj#NB0YlQ^7}I+F{pt zHAdDJ9K1pp!m`ONJd3$ljsx9fCrA+qQ1N|l^dx;cW`2~KijNAy*=X4d3^rAe@#yOYs^_E8%$B*Upmw;i5^gvi|3^b`M!6r8n^ zT=x|&fT+QCHxo4j=$@sb4BA5=CmaVgR*Z;u+-e1@gU$!M8#*VTML2dHPNigR4J`pt z->sp$JPXitsPUG(Rt!cDQ*L4Uk_C%b&DZIcGLZA^dr?P&8lX7SuwY+NhsurRH7sj{ zsfMF;x}A3hibp(s=OdpBhtwneIgR77w$5ymFwvrrW9Yst%3B8CsrT@QUoS%_eikn! z8i~#yY^FQPs(}~J9o^Z|h(^EOpW~`4z)BD2U3Ss0u*IU4X5RqfMPGlJg4^x*aPXOR zOjD=QMAJ;WB*}y5_UsYx-xh@5;n2Lua>2(Cb3DaI3=kyiNj~+yMeK=_LHpe)IN= za)LVzIly#5GYdGk>vHESmEgOGrLgl$b=ZB7a>IcTscKPEsi{b(LnQC-#1{$~$nWXT zI206)TvUUMN~C09vpH9}N%0N#o+RDA;g$p9?z_q(HZox#SEZ^vq#jKNV=1M#RpH=; zSC12nBGETxcdYDiA=G(uJ{T}8g@!Jrpsz1eP#O5U8CBEZR;Oj(&gB|7_a+<(p9K$36ggOK$-{I}aL+4^0juzVT1v%0{QX+^GY!!i z&PlWg*mIGH6Z$4dCSV(ke#uD=eT#^@CEv}ZzpKV_v7bW6OVZJ~?X}*%fI?{SZ0<2h zdxxh&ctbdSO0np=nnhgYTjUCRXt+PU08-3nrkM`Lfx-h#$5Qb;DCZiE{2uEC-aTFu zTtw@r_KU68JM&Di7g;!ONZW>s&!2reR}qgdMT9SXB7Psqk4%a7O9nh*HCMezAZHEh z5AIu8`{NhMwKlz_c6c?Xp)5^^TR$viMqSmY#-3Ks&A)rgA#eBalon$p-nHA=PS`o1 z`1bmO&!R-GwQMG%!Gkyp zVc$~mg$$qe{2>x<)^NZ0{c<{J7MM%ZiX>prZL@#|eG=ZuC~6$;O9h!^)fxXR;>_N9 zjb}AU0NhB4UR1)a*!-T#P^+^7BK_sRaTAEvsK1()!_iW3q06f786`oycu0FwRSp*4 zNtfIzRSoTHQtLE7tFXzulA{C2Sj4YhKI zq`g{&F9Pbp>Z1H_Q@a*aFuvrdY?F=Mtqwx!F1cvmHQnhcnglG2dy8+<7XmxQRwrhS z$9RjugzTIak#jpb%S3xN498O=-6tjp8d910hsEVENT$|(&&WlD`Re`J zm*tTG&+IoGc%2GB<6KU-s81&Jzh*h#Otj+9Jr}Iwx>y5Qt_90Ed^n`w%D=H$7>oA(issFVc;R3y5a@zl$7hG>IuXhweq`w8T3@^f?Q}}BV0am(3^u;4+~5=2V#5+;9b2O%QAjSO@2^}ljgik>WJpI)oNegix{B^2+ zCA=ILh1D;rzUsic3%=JV!x4@Z7pr;mSE1|qt+9TLB}Bqup*hpGLbMT(srz*=5Mhsu zTHmfr5M-m-9lpsEhqp{ppVmtNm&qDigF^-QEUAv>-t!FjX!@i^q^b@jCXZ0%sbmr1 z(JMB)dQ(v^W#^>6J7H>6o;77`UINw1HUnQa6@jxwEJ^QEIaKeW>)uO4#E3PE2a$az zK^K>}-A%(Xyz)9%t;O^mGHS@^d&d?)Qs%SSvQ-kyy#KtK^R*qnFpYZ!y~#m|Ey@Rp z0~;6zDo^{xFHgG5(eDtjJO}~aB?Um2n`N4*m#wV>;?_YtT`e($q%QT|)(8*(wggAd>ZxC6m zPZk)nx^=v#ibqNhSMmK{bFp!;^|M26H405+laB`n;5s!$jt7q?;_$BalQiW>y?C*!J?`0AUK}qV{ol zI0IKxw@T9xW^l(Od8H;}9iZdvLf3n?ToeXv|16#YRFS&~!WON-{aQzepFJB-(MV>A z?<^s7pBwit5zd`eNBC-9U&zAW@y*)qk99D2i_i6LA|6a4xx(O5QWdPz&pA9)?Ldjt3--33$;11vA-9_aJw1ZZpOhWG{dEmo z+r~z|UXli}+<_JQ`ke5(-!TnxiF6#hw`KRkhZ$h7cd43hl{mi=Xo4LK3sF-}`MTAO zWIQXe(Z{^!J?M9+YVb)BagM%qzbk`{;Y8;8*UryefX9}uxGFW8m5p3f6ikNmBiJGQg|v-8+&Vp=hH@O1Y2IS{5FzGR`aY%R!} zqwekUq8JaJ*rIR4nFRWeg6^c>uLsNUT~gGU*|3+cpjlNp9qd`FlM~(DP)fR}fL%Wk z_eVwOO25kohUUtBQVluae~E5quc9Y%4MxTv-b%u~xqL^&XzTIJQK_Q*oMl z=XD#To7dnKY;h zRq6WtHUUVFzF(go2#2L$bC&*x>FCSYuIph`2CWwq{qC-(;DCUaxyWiJc)dNRGc`k0 zA%1e0kPed|dhk=Ds%SW5Z@r`MR2GkV&cjx-ogbiTM>9vo)gl--u=%9oOT?=+e`Ga% zScX$#TiB~apFsgPv4OQnU@L7T8{-{;I@B5aCUYwB&BRG%sqKl_YAn%2sAO@|>sh)A zu@q3+c0XmvNHsIUF zRviaiJo&ZdQz4iy|Km!1Q!dP|Od9RiEJ6EOT~!YrBBOXv&F*+pAQ&}Nm`&;t4z6)( zQA=!@P)#!DQ@WN31yftR$4{Ce+Y_(Y6MiuuKh)&&RdPltnH| zGqpUp#TJgSZd842Co?drH`Dt);YWJPa`HPXg%3D296cg7oCJo{d0;a+AG5y;2uM1L-+aLi7gll#-aaV;>h7NI!qb@;7p3HJ+PDH}ACXLIs4MVE zhmN{7RT&OmJl#t_nudJSZ>9EHl)?HTw;i=QCAh)cJHnnq!lgH|?^5XUP|wG(uU3eN zF?#2!6!6gtqh`NGE8MijS3GPD=fX;H=xEKQpR(~V!zp>wjoA&ixIaB=-j$ABef-A* z)goc!Rzl0GLnSb%p7ZhQr8h9gnz)JQxh*{Dp*qCE^Bzyo@09;mcnh(h0q*w zY=*fr4_NH9otXq>p@a1xxt2>RUNEwH>p=>G+sbu|-W`cRI!lt|J#`<4sV7!MiwJpS&B1aszTTfDq!f)w(aO71 zzNVqTlahah{#0{KY4BNk z%~!KG_^+DCVN?A_cTG$m>YJLKdZ7LMKYskTLc~8Qh3w)LeqG%A94gLH<()S9hCI(- zG$o%~!V#0vtGkN&v0_n)X70^A{sje&W_T`WsVl>x*jV z-Vkq8JL4m&xwj2w`tDnm(-U42-g37(N(hs#q-z2ex#spYy@f_ zvuX}T=7ZxW!Ndz`kNy%Mja(Mp4si?&m&( zNy_2WddCR-R&=@PW_daAY|>KZbnKB%#LgI> zf8?g z-a37R1MOP1?WRUx-7uQ}88`;&neKEv68$LRW8l3pT>-+MyNbL%mSBLGpH1;XBCyo0 zE(~PHfL+vs;lutJnB;x4D&}G@G;O`KwAr#1di6|a^p@*Uf$O+_{JjROir>6EJK2q2 zu7r&&X5}t9m%40pSw9d}7a0Y< zTnO64`f2*e2=ZyXcGZub!p*Hd#ixn3?}+%rb&9*msF9~@UruCG{jKwK{C_(Y|N3Nb zdsIj_JAB5P(`NN&^}12rEw6$6RS!Wn)D_tF z&{1wOoBS>`f{9>r?Oapd(&)|-PS4uq(KBZaa^P>g`sC$^0%7k9(p*1IjS zE7MrH$>@Fb)p49Hw3^lb+zBor+Zw9}Yamfa@rcn-C>)>eYBm;YK(%Q#u1)J7@#XRA zr~R+S@ayR9+BU~FFlg-dNt*1$Z^6zKzd~N4TqnQRbFo$k84paPicbYEWy&6{;Yuho zTH-ye6png@WqmR-rEv0*;EZlNVWX&17XEFt3(|QQ<-bsO0QDM$>^b%b*bq{UN^DL8 zH?@l_j7~AI+>veNY}bV^ew6=Ey5t46LM@r=RylBGg#S0USQ#wE*eobm|EF!!ANmf& zH{yS!M(`PE61fq7%Z@Pm$KerF@|zue?(J}rn7@B4<;YX&4@eWGZ@s|afyQZfckS~} z1Bq=N6hGJ7P_DnGE+eQ0GV5bXm?)~SQuplS_OcETPu}%t>Qps$^nbX+F4T)piuUIz z3XX$Z)z`;C+diUxg-Wc~NDdl0?XdQpXoMc7!~IT-)hOe`%16;k8dm&bVyMK(mS9nJh_irA}xo+rzkt4I>N&+?b)h_GnndwT{b|{NsO0XBJ z4*!0md8ZXPk`%vE-0g%Xd&R~-rV*})I#i}1zUdg0+WeqeDhQQXlP5218A7SU4Z>#} zs{i@|l|9mIV5&wUxg5-DqucfI*tR1gx6%r>x^D z;JWuwn$K^EOp16nxfIGWWa~K-!FDAKyDT=|$Wry&h?pA&2K)X9gqw^MBg_xC_* zl7Hr%=y9Yb+cOGJ^Kr$md2Yd$$O8HhbkzPtGZZG!`ZEwYqHj;@#Id-wAcK;{XzkZ> z*x)05tKhA~de)aWqz|>g2@lD+E@GZjDO9vy5Jm-|iJG5zh>KeA!rdPV-y6|T<8sl2 zQ5$S?G%zFYtpU4(D;wSZMDoc~e!s_ZWw0STDo67<18yux2A%%d2#$^R++i~R*?s5z zw?^k5TulDXmhf@;jc+h%{E-R)vpa6unH_Q#S@uj3gX*aB?+T8@6t2BZ}dR# zJ~a)`@KH3OJ|Fihs2juCH%1?vAHpEMOY?~gLvU&C>LFwEYPcR763BR@6vGV8U1xG` z!gn;!EIe4!U=O*mGxObtkSIj6<-TGq6tEvX9CoZ$|-(3TF#C>YeU7$x#p5Qam)knUlGxx#PR-VI!^;yVk z<5<@mTY$2+mj<#ed*SX1Z}AvmNcWOmfIe$9@vmj}hwkJ5Z94vCzT~TffVQ;MF!tXw zVjbQn16f{~4W5Al6gmEov-wRMe7O3dM^`c$hmGH_Ut}kI3s`xi&Yt~%AuH}9HxK)Q z!swpu&J_(%bLvJaccdMtSzX$`#l8Y|NP5kB%{0KTEo`DYDeUos?(XYrS}icOc;w42 zm3GK$BEM$P+5wt9Jm(Y?GGP5Q_fIQ{Ot>57J(Th(2X~*E)V!C~h2@fstX4GvVC!@} zIx2<`iQaif<9@UbRBHxg*d~j>=tAUxS7aMj1(Fp;yOiRoOeV(cVMiprel`_D)(LY3 z8oCRn#c-xyINqAC3k8Kvis~3=qn)eES0TYH(DGb;etWSL!kpMu2mMGOXL4PkfFmEP z0-ETn2_iF!rq6*=kYJ6^Itws3R6(*~=^M4eP7qrpPvmDQ#zpg zK5e4wiMiK`thpi9w!~#IM(B1rTWKHY_q6AdD>-1`CVEp^d&1ksf6`1n| z0zozIWZ4$eC&2Q_GWV=U9nxIY5nm*1ba#CYcD9o(LOx%Gu3H}qK&(ZpOStqA4Bi~Q zHx-wSl(gxiK|E~`mZ!GDBb$cS?)(q-a8;oV$5R2yr)7lb*fFWBJ`sf}xA+`-mI6Lm z7U7>NJAgbRpqpc<2+s=~5+77Z!`%-1g2;=WKwD9kze#2c3_UivHo_W({;mlxe`}3^ zhBEVvO%(}7>EvoR?Hk0W>YF!PinQUUz^`?Q{jE5|*dcX<5Tnwyc1-8)Y{U1G6Jn={ zj%)i%PTGB@gkexey=)UZ5!;Z<`TO-sWp?`BK9Aw-}(&0?qpoDIb{&Q^s2^NzSGwX`twW4hq_>D3!N?$rA+Tf7p- zU$;g0x|ibfys36)K_b?`f=9J{R|Om%I&M0kp9|u{M@3e3>!F^&9K0upfMgM!0Fj0) z0t3?#)p}U_*M8*w4W9_k*GV%i|gf*w1qad$ti z$CtGHTeeYV;r5N1Da+IL5XV#|z+ci1ZyF8t?dZGkM65j3TuVHXqBG~7PuJs77Pzfy z`4S}A$$~t$_Cq+Sl(Z(>2QK2DCQXK_fQgKKv$$6^CNGQZlCEn*Et$uPE9KSjGc8up zfx8u-zX_ndz}kV&+x89D3$-JO{k@Jli!Vq={mOXsbgj52#!ZXYt^sThynOK=c)#rE zvY*70Lq`MiL{TPm=V#kb2kS8X0os^J^x@@Ij_KxK?T~H8&erg01YIxJ_qP#E zkq;u`xfFZGp$nas!lBXuO;;d zB>lgC#s9A*^$$+@zY``;D-#Vl0!CrxYk5vq$_h#@@V%~~8iCU!EkQ@E0X)QTXi-k3 z8@+>+Lr10;!7us!4B56Xc!}fY#etAE{M(!IPh}O}f0HoTQ>jzYQny8A!2_x#=9vgXNltG#lnZaJa8tw;@)Jy0X7FY?3SSDyQ3t zPlPj)H(UBKFZV)ZC{1O^&M3uAjL_hwoec&Y^%3J0DG*oPc*r6<33{lBK-z0*AoR=T zLd%XuwA9{h?wl6^$0L$1SBSKL#pDTU+aY7rJ7=W##V-Ij+wampXUf8Jhdr6Et|kB# zN9p@lQgPs(C~z~PBomk?q;Dwa+hfUg>$Jss6sjPlYC>kVd4z6d6kz zmPDFSA|(=brcTz4!iA z&wBOrSTAST@4eSvYm=KI+f*5fuhiIRyD<_@dGHF^y(RnifKQbRDJg z)WYbtI|FvcSe}-dpN)-HaUWbwuA=D%&!4BhH==&Be9&p%1oTlH$cgf5#Bz@Nol6(f z!DiPa-KHlP&qN5A@bEEFVEv1-kngFu{0pbf5?zpvZfMJ}ajW!aC}oXv&kIo}83 zI7*5Io!K3#?blKyueP7(9l5#oTYxMTWh$@FG zd@kpl6PftJb}Gw7x)8lb3iAX zLrn$x{9fj~nB4{H4_7JA+K}%{?pEduS34%oq;=M2gu~|EHhcHOX~6ry_U=S)J7o2&#?K*14zuh|6Ux8}TeQ$hYioiwB!|Rv*ZP?IGPq)`k z!RvoUTi^JRN(RxF_xeL)peu8=TYy9qX;5eMtdV?gpDp_3#Xm0^cF4PlFScdEyYbwg zNo1Unnt!mLrbGe8i7R-O3Kha#@zsU(gI?eg9Gq$z9u7&-yirNRbgVodWN+YF2th{D zcbG+0sNU_V5@k(ZcbSfkRvzW>NPpvq#1AjXSU8jE7RbN^0lLd^De}FmuYPIE;+yC* zz4M&mvRF`cUUqi7?=?K~r%rrKr2qnD)A_glAdkoA8IvF9hr^zg*~`0g!$D`g(6q#* zOsp$Vxcyp+frd54pWa(d?0F<}g7-`*6SsegkosD4{ji zd4n|Bui2}(a)(G9)J%4xeOePbY^r|jDppGBkuwDrk@nmc+n(SIdj@Jc9Pkm|TZcww z%Y&x!>u|#TL(HAv1n4)Bv*0A_km&EXue>K?9A;NBWKBu8Ot-4rq+^~8?znod&b>Sv zyuUpfka^66A9H3tnG6@AwqeHHKN^VB!H?38>Xt%cj53zI$wZ~j%6(P`3UH@##ia22 z5-cKD!L-INqDaA3{&{iDa9nG_g=m#D{IgwKRh2Z67}h##LZd$fb%EeB0hdy-&Tl@i zxMw~t+`ekCWz%W+-Njj@IuQVDJ8UNpIg*o(!M7T}Wy*nOvSq{8cVtYAI5%jcdGDmF zGj11vXe`!xJk|sDjB2B8vZSutdXd)>0d)xZI#D}iRfB;B>CrO5ckyP*GD$u1ee?3S zwE!-N!4kVoy7Y9i(=JptEu>q8M}&K)0>&E1PW*SBXJ0B%BcWin` zA^!0_DDvV$G)`UHe@$zCI#%x!C|#*s4VyNfzgvcNC}0}-L7TH2ByPnk+`3eXu|qK) z6YH`tZ;>$jt%-6R3J|)IpO_EjZ9I`~5l=u}=Bv?i$qvvhGAc-tgdkB8;C3H?G*_3)Af~y4w8t&{}sg=Ee__&{PFc_KzM8$8Z$0fi!L5-7bKBpTxa_> zj0i2u!nPENy@3OTaQ5Ti#RTaTNYYhTc(K0}ra96?W0Om9<(0^km6Cx_INM_&dSe3` z`5&$7JLU_|>8&aEB4V&<%Y{WdEbB2gwBl>CUj~-S z68Q0d_C3*%iD4#zTE*u|u;SgTWglFhqP^%*<)=#7c!d#s^^`*>Jd~N)7f+fKh#gtj zX2gCE_iAn3r?tT`O;!>rhQ@sDX=WaX!dt zGBrf$0cdocVMsed#;}wy7fk+mj2sWH9h4xIl;x^p`%mw#2mkBtq49nB@JggXSFz3> zuk~Ht(0`CD(jAO%C}%$fy5*9eTMie2_@mVgAq7tm{w$x;RLMcDMQRV;>pTR{^&tlq zLn}TrUAB?SkW6emlKAzfRym}mM!OA+RO9%mTlJg#=^)oW)0{mb79=|QC~-#&HkOf(k9 z27|YiIA_NL4X#u1@ao8Hc$u?VZTsr5 zaa*rX!FC3S-&dXfbFvx#I9<~`s8)b$SA>u4dJ={PCA)NTBVyn+d*snIq~dSG;MeBR z7f$R%Ne0A)-%u=cVBn88ONG5327;5tRsB11&iFobAy*sOz<#d8rBlXVgpHZg z3c6hlpe(iZ)o<@|Fj@5O*qK$O$k?(iZL41qh$eZhH73 zJ5n8MtXu(#_xCp&Ua7_C?2*L0%|+lpux*RqwWsju$Zz-6pRyrxiCO2m7h&k47%=(9 z=phPxO&3$K&BKMCygP2@C1KpvH)tSY90ibFqFAnwR7Jo0UM4oYo0WxQp$6uh=?z3Lv# z#G`urxxb|lxnu8Gdy#ci`wO2=f722?oUb+aUS=X>oGK2@%xJ*WbzY+D_LsvPPXF{V zc|-{>AF*%om!Y|C)sb3<9N4tP?#6HOKEJ7vy5*}wHtfF`<*NH50_iy+=kP!$a0;yI z?IQaz&CAbv8Ry+m9dU2)QnZN@G5DP*m>&AnC11f$>R_MW5ZktP){ z7M~o7g#^CH*UITx7_()Amht!*^s`I4ydgFdR=$0GXBSTcZsU%~P$f;D;9bd=vlqzc z`{$h_0&Gpd`PR0B{Zb?NSoPTSa+78i+xot*xmpgUXHCULzo){n<06fgmN_u$wc~fq z;sOkt(^`}#oevwAE`BPIngyZDBBVSND`BH%2uPIX!?}6KxpxIrK#o!LfegJmoU!@db>e&?6bK3(T(-6q zHSDui7`-S&4*KS`5C70%LqJ+jynPltb{&O`$!7GD&j<>YCF2sE1V3A!NJYug<(5G^ z6ERRbe88Hc46?s`HZTs*!KY(Cn9r{Jp0KxM=0Is^?t+)QN^}A7T`MUARq= zU6?Jh8P*LPDpkV`Z#80e#3W%E|HU_A#))_!)_IHK3I>d{x_sW%Lx;sHj|LC;M?!*W z&R(hG474~Eu!pCc+(7vDY=^m61#CBqR#WWA$A^j{dm?!XV4a}s0-vTUAa58F|687p z-_Smd zS`pOA+nc5PF|ltjdrqP0BXBA_xbBN%DyqBbmVe!w2{&G3=5El z;W!s&tV3A2-mD=HcG^+@~3QBPV{L)*y;Yjo5CpFG3nukhH5Z6Gh%v zuAt`^;KGD_&GNKXs9KTr(wTJ7k}%WW%05Xx*q>%3zB_am|JXqR>V@<3pBhm2X) z{cZDdhBP->uPQD#lr)It`^^4~RTtx>dmf8#?resG)iF7qq{+fpwpX5+TN+Sbw4L7P zQvu&z8f;(6EQMsvx?gM>!#J1Ci6djvJ>VO3(dPF}!*~@ZCztUWyk5XnpvYDSPh;HA z3;%3{&DTtFQ+0~BRf@0W`%ZM5&8%4Xn|!G^ z37^WjSq16NBYPvvTjA^UVyv%!0m{4&p8Lpu#?Wsc72kO&8rddiIZ8##L3iQQ z=CWNGsARtTruN4gn92=0`i49s%>S+`@@HWp7L_m9)@WD@POEMmn!B15rkp4--`V~g zzFcZrkGZX2^z5if>Zxu}I(|YTxiu5Bq(1ibz9l^s>k?3J@Q>1PCtoD~SGc}}W1 z&whE?QWAt(K_dr6yo%76>AttxzZp;R9!#^Be~kT+!KdurWn-y$YwIKKF7(>^gG=&p z7e;be3%~8Z4yPZrH{3~LVw3t2%`I2E|9NMyI+Oa(zmEU*nbdWY+m4<5y}&zEVkV9w z_-eODhK!pZ91>Eo&`d1`bd>(s_s|zVE$NTzu`Iy#6PX94-ouDbA%hwSw|QQ64jQipG&B7+u37vcpSVcz=Qcxb)1EOGi&A6jmY zdIK817|j!~XyfrN2#Kn)VQ99&A;Zc%ZE_|QEbDIb#bp3`v~h!_{3xn*t6#FzWMcd> z!NHV;)ev-8*8Vh47p#iBleTnT73^79x4vJe4UXO!IHAhZjl4^DF%Od>3y~AWa<>B+ zz`K;)UuH!KXdiF57TuW$=Lc5rOFWc|cHfTqtWxiU(H4GQ5Ay7t{Iw}Zi8MZ=8)z6X z9*kmdC7=}g|5R_JcK_mfL*O5y+pqPWv-8m$rjULwewAmPOT>V?W@09U%nc2-hG*{zDue) zow8-hWMkpdW#b}6J#u?XJ!0+Zf_&7~nwnq#S`C~=P zL#{JRB$PB`z*=Va}CXS~*&=SS!(CmWK}ewr;9AX#EI!!>fm< zoEq?(D_nW7I}7$)JblSgv=!N|2}vCrYR6tbiw)oJ6k=xP1k==|8SOKj)rG9`ahdUa zUQ>DwTA1j$KF^HD5aqw#?&;{kM@y zOS`Tkq{x89F^5KZhbwSsX~B4e_*KjcTPthwGyqpb?&6woX~RZ&Bg6gmq%~K7>4(s#kRMEcH%PG2X_NCNVWcNiPMaQ?eJrY7UAvKf+3Bv z$I9EE;CRQI(i6TFSQM$xbGOzLC$ln@Z9kFQXrJC)K$!~obY^{kd|4C7F?%uvPgTKr zCEj`Kvx5J5Ke9TL`v2aK|MvjF%Gln{;s(upo;lZHw!1X*1)j9yG_k|%hyT-=)QjHL zc^ic)AZ^q?UX-l{Z$I%Aa?~$|#(5vZ=AG>Y>!#{`f?Ro~& z>)3VPn?%D3wn4vm<2(!=i+t*7T@IaDE&__1=n&g)W574r3bXECRa3|@y|<8^uuDJ( zmI@W`krTSr;zDqIa&&v}wl|NA622 zt*!ui>+jtWGZY~92csyUHLHwoFbm5F0oH1-6{GxIb91-Ht5;5=^7@K z$-dmY`T+yT=DC21NIO(XnQAXoh=oa5DAQur2i?-L>%YdeB6H}SV9aPIwgk@P2`ATK z$ENJXA=mREr%kf#fm-a6hCNjx2Zo z{B0vSX7-f)<9odVJ_tTq#xesUP;bATZVlPCE0a5NsryXd)Ko%lVbIf`WFkaLm6q&!sXMz*Baz=BXAJy zuG%d>%kdcx7xbTLqL`{2IAJEzg~(*AEu(dYmCEZgiI zZOx3#tj?R6np#+2lxFcaBWpAJ|5J!8LtSJy=kWZ0itOCH3|3VvyZI6FnZ>=_+T<1= zzhrw~)F97o=8`1s$7*fXA=~CvS4kq-ECN zEvuB!4&5nu`%+oHqG%GFO-xl*a=*m9vl_9(95Wae6|^?^%}eaBc&xeVNfs!KB!@Zh zPT{{xR`{PL_AhMzck6tp{@uU-P%AIW8eKIvCdTdUjIIAJ@}~AF{2xA5+2*scStzpE z7+5pW-NR@0Cn?{V6HM<|^SupTo4!&yRZ0iO=Uwd2Oc{RjG?%^4xP3Jq zxn8BShm_pI(j3=!q3>EjWp1)n%JpEpdg=V$R_tfF+sdvI`!a4BanGKzz z*N05|Os}^wp<}IUgY6;pZj>zZ4|u+_8M$sSCTnIp$u!l&0RrZ=*y4SEeUu&pe&p=n zG+keh8`D!%^4_Olz^dkZd(+93(gksoYkA4UGp~crwFAw#==LlB=I>;Qcu3gtEqgQg zDEB{^d0!3|Z<+=sE>z$F&%&_m>n*4#|4`e2*$v8^I}(ONI?$$?O~C1BH@3{~-}yDL z8GAUm9vTkTqswxs9p`2rqmU9qs^@7P94(aPty@|Psb+Wi7JX`maQc+{UIJc?cPQ)sC3@PY2BHPyS~0!GgIz}Q9Wz&;!~MuwS@L(178`UVeg_#+F77Fr-YrM z!$5m^&(E{1q=la1>(Ld*Xm~&=;6CkC0=&yDT6N%KD;|;!;BOac1GliSxnk@M!0!2t zCau*851HCW_{zJmC7Ml7jXW`}5LuMDo2?n9weoyvWDrekX1KoRz) z3%xb{R7|$SCN8ZdkDXi6j~Rat>c-t(*Ix-8tVaPp?J3XJA{1omcP_6X-(L$Hw;t-P z!G5u3zP(pk@nT_<6dwnIU!iSL>iI%!(f(X~`*;r;Itm&2-@OgL7Au@&i+GA(KIrdQ zbef52qfDcvAa_ydn3kY1gPC0Wn=5Uy~|M7D#x3)fxGX?MZ=`*giDB8JoY}$ zZ&}NzK@rns@q#OuV63wF!>7DX=#I12=`$nS{N{CkYNra}rmdKPLsAcUT)y~mD7*(Y z`rW!bA|GOi&zlF6kjuo zM(KLFPdp3|Sw6jSe|RV4co&?Q5i5sn%_dPRo7(aAp*EV7)pJ}vFymXBUJlzeM?6O? zQ*iSMyBny{WHF|ZuyyY{^J zK%Q(jWz78Pi><|-Pgb^x%~Zp1sPBdQBjok2X{hZ%{xrC@#4PY+2W-lBF`03%hx3J9 z!N(?bKTkB0P8kxq8AEWzz)4iZP{j5NWHIWTbM@%HZg-IS|jNY82Bme zDIZx!1Y5KucHq<7(t^X=n=vFbVAmFsFQ5CtwSTd9RB@ua|`dWRgn<$Yt!PDt$`^u+II}5x&0~B^94V?)0pHm2{iVFXZ>* zu2KBs?9D>t+j;rto?&-*CpDXNRrV>CE9{nb?aF~>hq&{5$2#!5$8vVs=O(;c?;n5L zu^V3Vz4359#DG;}X<331oiLFz8u+m$7aebz)n32%961w)9rQ_q6UntEGmZAmC^OD@ zINCsm-mNz4%S_YJB_$~F_;V(_vW2jq^UuNQ_o(WAe{zvc$>i{y);5$CeX=e!umlym zcbr^xBcCnP*@Wu3dh>I1a;RnfEjN ztKi!#YR=o}gAp3n_o;T(LWuac-(9LPFlDP<^RT!FqzpODT`rQgT-+tRh9imptku?> zu(!KrVt>uf%v8rl%h=A?%FN!(?il$)MTK^gCT7m=yZoY=wV9oC$n)yZ+VR@RlB#W>D&0LN}SdnJFgGG27&0K{=cstEpgGE@0W`2xC zcn8h=q%ZG($x@c2X}~H@@-$%;Cy83Jijz#Qu!@sZZ?KA!TwPhkNwW7?#Ywh)tl}hH z8mllYGlq#Yw)^tl}i!Mpki>ZwIS5$+wSH zoa8&qA}&wzeZwkF^8LUnPV)W6Do*m9WffPTS0z*f>Rjhz%nO zL~LB3K*WX>YothQ*i#^4!eF7h}a0GK*UBQ1tK;QC=jucPJxJx zT-InCu~AHch>dazL~PVjAY!A10udYC6o}Xuq(H>RD+)wxyrV$G#xw;YHh!>1+ldXf zIV|7H+ldV>3Pfxyp+Lk2KLsK-)>0s1V*>>uHY6wzu^~%=hz%tQL~Hb7` zL~O)RAYvn#0udXT6o}ZMQy^mF83iIXswfb#(eO9oW3g%?I@+li@zG1gh>(|5j2IcG zVnoRl6(df*QZXXs7ZoE`=FX+ak7!v)#fX<>6pSn2%Ni<1 zyogXS;zgW-{k?pZp<=|#b}B}^?4e@Bi#in}UXD;P;^hPtBVJBZG2+FTiV-j7RE&7B zp(V#LcMDn`77QZeEsnu-xGNmPt@$)IAyOFjkrdr4bL z#fXkEs~(5>CbBY1ghv F{~OeTgaiNp literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py index 1bd9dd0d0f..8442844a24 100644 --- a/source/tests/pd/model/test_autodiff.py +++ b/source/tests/pd/model/test_autodiff.py @@ -60,7 +60,7 @@ def stretch_box(old_coord, old_box, new_box): class ForceTest: def test( self, - ): + ) -> None: env.enable_prim(True) places = 5 delta = 1e-5 @@ -86,10 +86,10 @@ def np_infer_coord( ): result = eval_model( self.model, - paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + paddle.to_tensor(coord, place=env.DEVICE).unsqueeze(0), cell.unsqueeze(0), atype, - spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + spins=paddle.to_tensor(spin, place=env.DEVICE).unsqueeze(0), ) # detach ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} @@ -100,10 +100,10 @@ def np_infer_spin( ): result = eval_model( self.model, - paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + paddle.to_tensor(coord, place=env.DEVICE).unsqueeze(0), cell.unsqueeze(0), atype, - spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + spins=paddle.to_tensor(spin, place=env.DEVICE).unsqueeze(0), ) # detach ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} @@ -133,7 +133,7 @@ def ff_spin(_spin): class VirialTest: def test( self, - ): + ) -> None: places = 5 delta = 1e-4 natoms = 5 @@ -153,10 +153,10 @@ def np_infer( ): result = eval_model( self.model, - paddle.to_tensor(stretch_box(coord, cell, new_cell)) - .to(device="cpu") - .unsqueeze(0), - paddle.to_tensor(new_cell).to(device="cpu").unsqueeze(0), + paddle.to_tensor( + stretch_box(coord, cell, new_cell), place="cpu" + ).unsqueeze(0), + paddle.to_tensor(new_cell, place="cpu").unsqueeze(0), atype, ) # detach @@ -177,36 +177,35 @@ def ff(bb): class TestEnergyModelSeAForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelSeAVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2Force(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -214,7 +213,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelDPAUniVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -222,7 +221,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybridForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -230,7 +229,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybridVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -238,7 +237,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBLForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -246,7 +245,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBLVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -254,7 +253,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeAForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_spin) self.type_split = False self.test_spin = True diff --git a/source/tests/pd/model/test_descriptor_dpa2.py b/source/tests/pd/model/test_descriptor_dpa2.py new file mode 100644 index 0000000000..e42585a433 --- /dev/null +++ b/source/tests/pd/model/test_descriptor_dpa2.py @@ -0,0 +1,207 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.model.descriptor import ( + DescrptDPA2, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA2(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = ( + paddle.to_tensor(cell, dtype=env.GLOBAL_PD_FLOAT_PRECISION) + .reshape([1, 3, 3]) + .to(device=env.DEVICE) + ) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = ( + paddle.to_tensor(coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION) + .reshape([1, -1, 3]) + .to(device=env.DEVICE) + ) + self.atype = ( + paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) + .reshape([1, -1]) + .to(device=env.DEVICE) + ) + self.ref_d = paddle.to_tensor( + [ + 8.435412613327306630e-01, + -4.717109614540972440e-01, + -1.812643456954206256e00, + -2.315248767961955167e-01, + -7.112973006771171613e-01, + -4.162041919507591392e-01, + -1.505159810095323181e00, + -1.191652416985768403e-01, + 8.439214937875325617e-01, + -4.712976890460106594e-01, + -1.812605149396642856e00, + -2.307222236291133766e-01, + -7.115427800870099961e-01, + -4.164729253167227530e-01, + -1.505483119125936797e00, + -1.191288524278367872e-01, + 8.286420823261241297e-01, + -4.535033763979030574e-01, + -1.787877160970498425e00, + -1.961763875645104460e-01, + -7.475459187804838201e-01, + -5.231446874663764346e-01, + -1.488399984491664219e00, + -3.974117581747104583e-02, + 8.283793431613817315e-01, + -4.551551577556525729e-01, + -1.789253136645859943e00, + -1.977673627726055372e-01, + -7.448826048241211639e-01, + -5.161350182531234676e-01, + -1.487589463573479209e00, + -4.377376017839779143e-02, + 8.295404560710329944e-01, + -4.492219258475603216e-01, + -1.784484611185287450e00, + -1.901182059718481143e-01, + -7.537407667483000395e-01, + -5.384371277650709109e-01, + -1.490368056268364549e00, + -3.073744832541754762e-02, + ], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + place=env.DEVICE, + ) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa2.pd" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pd" + + def test_descriptor(self) -> None: + with open(Path(CUR_DIR) / "models" / "dpa2.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams.pop("type") == "dpa2" + dparams["concat_output_tebd"] = False + dparams["use_tebd_bias"] = True + des = DescrptDPA2( + **dparams, + ).to(env.DEVICE) + target_dict = des.state_dict() + source_dict = paddle.load(str(self.file_model_param)) + # type_embd of repformer is removed + source_dict.pop("type_embedding.embedding.embedding_net.layers.0.bias") + type_embd_dict = paddle.load(str(self.file_type_embed)) + target_dict = translate_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, + ) + des.set_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA2( + **dparams, + ).to(env.DEVICE) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + record[all_keys.index(kk)] = True + target_dict[kk] = vv + assert len(type_embd_dict.keys()) == 2 + it = iter(type_embd_dict.keys()) + for _ in range(2): + kk = next(it) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + record[all_keys.index("repinit.compress_data.0")] = True + record[all_keys.index("repinit.compress_info.0")] = True + assert all(record) + return target_dict diff --git a/source/tests/pd/model/test_dpa2.py b/source/tests/pd/model/test_dpa2.py new file mode 100644 index 0000000000..f441007cad --- /dev/null +++ b/source/tests/pd/model/test_dpa2.py @@ -0,0 +1,333 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor.dpa2 import DescrptDPA2 as DPDescrptDPA2 +from deepmd.dpmodel.descriptor.dpa2 import ( + RepformerArgs, + RepinitArgs, +) +from deepmd.pd.model.descriptor.dpa2 import ( + DescrptDPA2, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDescrptDPA2(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self) -> None: + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ) -> None: + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + davg_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd = 0.1 + np.abs(dstd) + dstd_2 = 0.1 + np.abs(dstd_2) + + for ( + riti, + riz, + rp1c, + rp1d, + rp1g, + rp1a, + rp2g, + rp2a, + rph, + rp2gate, + rus, + rpz, + sm, + prec, + ect, + ns, + ) in itertools.product( + ["concat", "strip"], # repinit_tebd_input_mode + [ + True, + ], # repinit_set_davg_zero + [True, False], # repformer_update_g1_has_conv + [True, False], # repformer_update_g1_has_drrd + [True, False], # repformer_update_g1_has_grrg + [ + False, + ], # repformer_update_g1_has_attn + [ + False, + ], # repformer_update_g2_has_g1g1 + [True, False], # repformer_update_g2_has_attn + [ + False, + ], # repformer_update_h2 + [ + True, + ], # repformer_attn2_has_gate + ["res_avg", "res_residual"], # repformer_update_style + [ + True, + ], # repformer_set_davg_zero + [ + True, + ], # smooth + ["float64"], # precision + [False, True], # use_econf_tebd + [ + False, + True, + ], # new sub-structures (use_sqrt_nnei, g1_out_conv, g1_out_mlp) + ): + if ns and not rp1d and not rp1g: + continue + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + if prec == "float64": + atol = 1e-8 # marginal GPU test cases... + + repinit = RepinitArgs( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + nsel=self.sel_mix, + tebd_input_mode=riti, + set_davg_zero=riz, + ) + repformer = RepformerArgs( + rcut=self.rcut / 2, + rcut_smth=self.rcut_smth, + nsel=nnei // 2, + nlayers=3, + g1_dim=20, + g2_dim=10, + axis_neuron=4, + update_g1_has_conv=rp1c, + update_g1_has_drrd=rp1d, + update_g1_has_grrg=rp1g, + update_g1_has_attn=rp1a, + update_g2_has_g1g1=rp2g, + update_g2_has_attn=rp2a, + update_h2=rph, + attn1_hidden=20, + attn1_nhead=2, + attn2_hidden=10, + attn2_nhead=2, + attn2_has_gate=rp2gate, + update_style=rus, + set_davg_zero=rpz, + use_sqrt_nnei=ns, + g1_out_conv=ns, + g1_out_mlp=ns, + ) + + # dpa2 new impl + dd0 = DescrptDPA2( + self.nt, + repinit=repinit, + repformer=repformer, + # kwargs for descriptor + smooth=sm, + exclude_types=[], + add_tebd_to_repinit_out=False, + precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + + dd0.repinit.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.repinit.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.mean = paddle.to_tensor(davg_2, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.stddev = paddle.to_tensor(dstd_2, dtype=dtype).to( + device=env.DEVICE + ) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.mapping, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptDPA2.deserialize(dd0.serialize()) + rd1, _, _, _, _ = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.mapping, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + ) + # dp impl + dd2 = DPDescrptDPA2.deserialize(dd0.serialize()) + rd2, _, _, _, _ = dd2.call( + self.coord_ext, self.atype_ext, self.nlist, self.mapping + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd2, + rtol=rtol, + atol=atol, + ) + + @unittest.skip("skip jit in paddle temporally") + def test_jit( + self, + ) -> None: + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + davg_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd = 0.1 + np.abs(dstd) + + for ( + riti, + riz, + rp1c, + rp1d, + rp1g, + rp1a, + rp2g, + rp2a, + rph, + rp2gate, + rus, + rpz, + sm, + prec, + ect, + ns, + ) in itertools.product( + ["concat", "strip"], # repinit_tebd_input_mode + [ + True, + ], # repinit_set_davg_zero + [ + True, + ], # repformer_update_g1_has_conv + [ + True, + ], # repformer_update_g1_has_drrd + [ + True, + ], # repformer_update_g1_has_grrg + [ + True, + ], # repformer_update_g1_has_attn + [ + True, + ], # repformer_update_g2_has_g1g1 + [ + True, + ], # repformer_update_g2_has_attn + [ + False, + ], # repformer_update_h2 + [ + True, + ], # repformer_attn2_has_gate + ["res_avg", "res_residual"], # repformer_update_style + [ + True, + ], # repformer_set_davg_zero + [ + True, + ], # smooth + ["float64"], # precision + [False, True], # use_econf_tebd + [True], # new sub-structures (use_sqrt_nnei, g1_out_conv, g1_out_mlp) + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + + repinit = RepinitArgs( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + nsel=self.sel_mix, + tebd_input_mode=riti, + set_davg_zero=riz, + ) + repformer = RepformerArgs( + rcut=self.rcut / 2, + rcut_smth=self.rcut_smth, + nsel=nnei // 2, + nlayers=3, + g1_dim=20, + g2_dim=10, + axis_neuron=4, + update_g1_has_conv=rp1c, + update_g1_has_drrd=rp1d, + update_g1_has_grrg=rp1g, + update_g1_has_attn=rp1a, + update_g2_has_g1g1=rp2g, + update_g2_has_attn=rp2a, + update_h2=rph, + attn1_hidden=20, + attn1_nhead=2, + attn2_hidden=10, + attn2_nhead=2, + attn2_has_gate=rp2gate, + update_style=rus, + set_davg_zero=rpz, + use_sqrt_nnei=ns, + g1_out_conv=ns, + g1_out_mlp=ns, + ) + + # dpa2 new impl + dd0 = DescrptDPA2( + self.nt, + repinit=repinit, + repformer=repformer, + # kwargs for descriptor + smooth=sm, + exclude_types=[], + add_tebd_to_repinit_out=False, + precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + + dd0.repinit.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.repinit.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.mean = paddle.to_tensor(davg_2, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.stddev = paddle.to_tensor(dstd_2, dtype=dtype).to( + device=env.DEVICE + ) + model = paddle.jit.to_static(dd0) diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py index db6497b605..1d924e2d3d 100644 --- a/source/tests/pd/model/test_forward_lower.py +++ b/source/tests/pd/model/test_forward_lower.py @@ -140,22 +140,21 @@ def test( class TestEnergyModelSeA(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_se_e2_a) self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_dpa1) self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_dpa2) self.model = get_model(model_params).to(env.DEVICE) @@ -163,7 +162,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBL(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_zbl) self.model = get_model(model_params).to(env.DEVICE) @@ -171,7 +170,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeA(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) self.test_spin = True @@ -180,7 +179,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinDPA1(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) model_params["descriptor"] = copy.deepcopy(model_dpa1)["descriptor"] @@ -192,7 +191,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinDPA2(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) model_params["descriptor"] = copy.deepcopy(model_dpa2)["descriptor"] diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py index 5d67491943..29d2f84eea 100644 --- a/source/tests/pd/model/test_null_input.py +++ b/source/tests/pd/model/test_null_input.py @@ -23,6 +23,7 @@ ) from .test_permutation import ( model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -32,7 +33,7 @@ class NullTest: def test_nloc_1( self, - ): + ) -> None: natoms = 1 generator = paddle.seed(GLOBAL_SEED) # paddle.seed(1000) @@ -60,7 +61,7 @@ def test_nloc_1( def test_nloc_2_far( self, - ): + ) -> None: natoms = 2 generator = paddle.seed(GLOBAL_SEED) cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) @@ -100,3 +101,10 @@ def setUp(self): model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelDPA2(unittest.TestCase, NullTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py index 4543348d3b..297614b45d 100644 --- a/source/tests/pd/model/test_permutation.py +++ b/source/tests/pd/model/test_permutation.py @@ -416,7 +416,6 @@ def setUp(self) -> None: self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py index 85c90dc60f..84a0d3d724 100644 --- a/source/tests/pd/model/test_rot.py +++ b/source/tests/pd/model/test_rot.py @@ -176,7 +176,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, RotTest): def setUp(self): model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_rot_denoise.py b/source/tests/pd/model/test_rot_denoise.py index 74d5d41791..4a1841d10b 100644 --- a/source/tests/pd/model/test_rot_denoise.py +++ b/source/tests/pd/model/test_rot_denoise.py @@ -18,8 +18,9 @@ from ..common import ( eval_model, ) -from .test_permutation_denoise import ( # model_dpa2, +from .test_permutation_denoise import ( model_dpa1, + model_dpa2, ) dtype = paddle.float64 @@ -112,6 +113,14 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, RotDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + # @unittest.skip("hybrid not supported at the moment") # class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): # def setUp(self): diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py index cc50043ad8..f907e6f4ee 100644 --- a/source/tests/pd/model/test_smooth.py +++ b/source/tests/pd/model/test_smooth.py @@ -20,6 +20,7 @@ ) from .test_permutation import ( # model_dpau, model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -189,6 +190,36 @@ def setUp(self): self.aprec = 1e-5 +class TestEnergyModelDPA2(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["descriptor"]["repinit"]["rcut"] = 8 + model_params["descriptor"]["repinit"]["rcut_smth"] = 3.5 + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = 1e-5, 1e-4 + + +class TestEnergyModelDPA2_1(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +class TestEnergyModelDPA2_2(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py index 3fae49d598..f050596996 100644 --- a/source/tests/pd/model/test_trans.py +++ b/source/tests/pd/model/test_trans.py @@ -110,7 +110,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, TransTest): def setUp(self): model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_unused_params.py b/source/tests/pd/model/test_unused_params.py new file mode 100644 index 0000000000..bf92171da1 --- /dev/null +++ b/source/tests/pd/model/test_unused_params.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_dpa2, +) + +dtype = paddle.float64 + + +@unittest.skip("paddle do not support unpacking grad_fn.next_functions") +class TestUnusedParamsDPA2(unittest.TestCase): + def test_unused(self): + import itertools + + for conv, drrd, grrg, attn1, g1g1, attn2, h2 in itertools.product( + [True], + [True], + [True], + [True], + [True], + [True], + [True], + ): + if (not drrd) and (not grrg) and h2: + # skip the case h2 is not envolved + continue + if (not grrg) and (not conv): + # skip the case g2 is not envolved + continue + model = copy.deepcopy(model_dpa2) + model["descriptor"]["repformer"]["nlayers"] = 2 + # model["descriptor"]["combine_grrg"] = cmbg2 + model["descriptor"]["repformer"]["update_g1_has_conv"] = conv + model["descriptor"]["repformer"]["update_g1_has_drrd"] = drrd + model["descriptor"]["repformer"]["update_g1_has_grrg"] = grrg + model["descriptor"]["repformer"]["update_g1_has_attn"] = attn1 + model["descriptor"]["repformer"]["update_g2_has_g1g1"] = g1g1 + model["descriptor"]["repformer"]["update_g2_has_attn"] = attn2 + model["descriptor"]["repformer"]["update_h2"] = h2 + model["fitting_net"]["neuron"] = [12, 12, 12] + self._test_unused(model) + + def _test_unused(self, model_params): + self.model = get_model(model_params).to(env.DEVICE) + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + result_0 = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + test_keys = ["energy", "force", "virial"] + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + + # use computation graph to find all contributing tensors + def get_contributing_params(y, top_level=True): + nf = y.grad_fn.next_functions if top_level else y.next_functions + for f, _ in nf: + try: + yield f.variable + except AttributeError: + pass # node has no tensor + if f is not None: + yield from get_contributing_params(f, top_level=False) + + contributing_parameters = set(get_contributing_params(ret0["energy"])) + all_parameters = set(self.model.parameters()) + non_contributing = all_parameters - contributing_parameters + self.assertEqual(len(non_contributing), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py index f82f7a8cd0..769ea6f6d3 100644 --- a/source/tests/pd/test_finetune.py +++ b/source/tests/pd/test_finetune.py @@ -197,7 +197,7 @@ def test_finetune_change_out_bias(self): self.tearDown() - def test_finetune_change_type(self): + def test_finetune_change_type(self) -> None: if not self.mixed_types: # skip when not mixed_types return @@ -284,7 +284,7 @@ def test_finetune_change_type(self): self.tearDown() - def tearDown(self): + def tearDown(self) -> None: for f in os.listdir("."): if f.startswith("model") and f.endswith(".pd"): os.remove(f) @@ -295,7 +295,7 @@ def tearDown(self): class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -311,7 +311,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyZBLModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -327,7 +327,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyDOSModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "dos/input.json") with open(input_json) as f: self.config = json.load(f) @@ -342,7 +342,7 @@ def setUp(self): class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -356,9 +356,8 @@ def setUp(self): self.testkey = None -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py index 65210d07b3..ec29024810 100644 --- a/source/tests/pd/test_multitask.py +++ b/source/tests/pd/test_multitask.py @@ -29,6 +29,7 @@ ) from .model.test_permutation import ( + model_dpa2, model_se_e2_a, ) @@ -222,5 +223,44 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskDPA2(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_DPA2 = deepcopy(multitask_template) + multitask_DPA2["model"]["shared_dict"]["my_descriptor"] = model_dpa2[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "DPA2" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_DPA2 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py index c3d65c09df..da6a22dc62 100644 --- a/source/tests/pd/test_training.py +++ b/source/tests/pd/test_training.py @@ -24,6 +24,7 @@ from .model.test_permutation import ( model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -149,9 +150,7 @@ def setUp(self) -> None: self.config["model"] = deepcopy(model_se_e2_a) self.config["training"]["numb_steps"] = 1 self.config["training"]["save_freq"] = 1 - # import paddle enable_prim(True) - # assert paddle.framework.core._is_eager_prim_enabled() def tearDown(self) -> None: DPTrainTest.tearDown(self) @@ -195,5 +194,21 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestEnergyModelDPA2(unittest.TestCase, DPTrainTest): + def setUp(self) -> None: + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pd/test_update_sel.py b/source/tests/pd/test_update_sel.py index e7b1acf6ff..10342357c6 100644 --- a/source/tests/pd/test_update_sel.py +++ b/source/tests/pd/test_update_sel.py @@ -31,7 +31,7 @@ def setUp(self) -> None: return super().setUp() @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_one_sel(self, sel_mock): + def test_update_one_sel(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto") @@ -45,7 +45,7 @@ def test_update_one_sel(self, sel_mock): @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_hybrid(self, sel_mock): + def test_update_sel_hybrid(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] jdata = { @@ -76,7 +76,7 @@ def test_update_sel_hybrid(self, sel_mock): self.assertEqual(jdata, expected_out) @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel(self, sel_mock): + def test_update_sel(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] jdata = { @@ -90,9 +90,8 @@ def test_update_sel(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_auto(self, sel_mock): + def test_update_sel_atten_auto(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -118,9 +117,8 @@ def test_update_sel_atten_auto(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_int(self, sel_mock): + def test_update_sel_atten_int(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -146,9 +144,8 @@ def test_update_sel_atten_int(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_list(self, sel_mock): + def test_update_sel_atten_list(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -174,7 +171,50 @@ def test_update_sel_atten_list(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - def test_skip_frozen(self): + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_dpa2_auto(self, sel_mock) -> None: + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "nsel": "auto", + "three_body_rcut": 4.0, + "three_body_sel": "auto", + }, + "repformer": { + "rcut": 4.0, + "nsel": "auto", + }, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "nsel": 28, + "three_body_rcut": 4.0, + "three_body_sel": 28, + }, + "repformer": { + "rcut": 4.0, + "nsel": 28, + }, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_skip_frozen(self) -> None: jdata = { "model": { "type": "frozen", @@ -185,7 +225,7 @@ def test_skip_frozen(self): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - def test_wrap_up_4(self): + def test_wrap_up_4(self) -> None: self.assertEqual(self.update_sel.wrap_up_4(12), 3 * 4) self.assertEqual(self.update_sel.wrap_up_4(13), 4 * 4) self.assertEqual(self.update_sel.wrap_up_4(14), 4 * 4) From 79ed0f0864ce036bb8808b3f25cfdadaeda85adf Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 2 Dec 2024 00:36:05 +0800 Subject: [PATCH 53/58] fix typos --- deepmd/pd/model/descriptor/repformer_layer.py | 9 ++++++--- deepmd/pd/model/descriptor/se_t_tebd.py | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/deepmd/pd/model/descriptor/repformer_layer.py b/deepmd/pd/model/descriptor/repformer_layer.py index 1243437298..a09c5cbe17 100644 --- a/deepmd/pd/model/descriptor/repformer_layer.py +++ b/deepmd/pd/model/descriptor/repformer_layer.py @@ -969,12 +969,15 @@ def _cal_hg( g2 = _apply_nlist_mask(g2, nlist_mask) if not smooth: # nb x nloc - # must use type_as here to convert bool to float, otherwise there will be numerical difference from numpy + # must use astype here to convert bool to float, otherwise there will be numerical difference from numpy if not use_sqrt_nnei: - invnnei = 1.0 / (epsilon + paddle.sum(nlist_mask.type_as(g2), axis=-1)) + invnnei = 1.0 / ( + epsilon + paddle.sum(nlist_mask.astype(g2.dtype), axis=-1) + ) else: invnnei = 1.0 / ( - epsilon + paddle.sqrt(paddle.sum(nlist_mask.type_as(g2), axis=-1)) + epsilon + + paddle.sqrt(paddle.sum(nlist_mask.astype(g2.dtype), axis=-1)) ) # nb x nloc x 1 x 1 invnnei = invnnei.unsqueeze(-1).unsqueeze(-1) diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py index 31cf352d33..a8b9a6a417 100644 --- a/deepmd/pd/model/descriptor/se_t_tebd.py +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -888,12 +888,12 @@ def forward( # (ntypes * ntypes) * (nt+nt) two_side_type_embedding = paddle.concat( [type_embedding_i, type_embedding_j], -1 - ).reshape(-1, nt * 2) + ).reshape([-1, nt * 2]) tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) # (nfnl x nt_i x nt_j) x ng gg_t = paddle.take_along_axis(tt_full, indices=idx, axis=0) # (nfnl x nt_i x nt_j) x ng - gg_t = gg_t.reshape(nfnl, nnei, nnei, ng) + gg_t = gg_t.reshape([nfnl, nnei, nnei, ng]) if self.smooth: gg_t = ( gg_t From ed80c6d0025c8bb5e3687fe8fe3ac3df4133e2e2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 02:40:40 +0000 Subject: [PATCH 54/58] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pd/model/descriptor/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 8935371c01..8af9582527 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -26,11 +26,11 @@ __all__ = [ "BaseDescriptor", "DescriptorBlock", + "DescrptBlockRepformers", "DescrptBlockSeA", "DescrptBlockSeAtten", "DescrptDPA1", "DescrptDPA2", "DescrptSeA", "prod_env_mat", - "DescrptBlockRepformers", ] From 89d82f9519f86850fc212fc52eab7cd05f590271 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 2 Dec 2024 14:19:27 +0800 Subject: [PATCH 55/58] update UT code in test_se_t_tebd --- .../consistent/descriptor/test_se_t_tebd.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/source/tests/consistent/descriptor/test_se_t_tebd.py b/source/tests/consistent/descriptor/test_se_t_tebd.py index bb4a5db6e7..9cdca9bde3 100644 --- a/source/tests/consistent/descriptor/test_se_t_tebd.py +++ b/source/tests/consistent/descriptor/test_se_t_tebd.py @@ -17,6 +17,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, CommonTest, parameterized, @@ -34,6 +35,10 @@ from deepmd.jax.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdJAX else: DescrptSeTTebdJAX = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdPD +else: + DescrptSeTTebdPD = None if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.se_t_tebd import ( DescrptSeTTebd as DescrptSeTTebdStrict, @@ -146,12 +151,14 @@ def skip_tf(self) -> bool: ) = self.param return True + skip_pd = not INSTALLED_PD skip_jax = not INSTALLED_JAX skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT tf_class = DescrptSeTTebdTF dp_class = DescrptSeTTebdDP pt_class = DescrptSeTTebdPT + pd_class = DescrptSeTTebdPD jax_class = DescrptSeTTebdJAX array_api_strict_class = DescrptSeTTebdStrict args = descrpt_se_e3_tebd_args().append(Argument("ntypes", int, optional=False)) @@ -243,6 +250,16 @@ def eval_jax(self, jax_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: return self.eval_array_api_strict_descriptor( array_api_strict_obj, From 617a2584323cee7f957c270c197688d60b74ad4c Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 2 Dec 2024 14:23:29 +0800 Subject: [PATCH 56/58] update __init__ --- deepmd/pd/model/descriptor/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 8af9582527..cee9dbf226 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -22,6 +22,10 @@ DescrptBlockSeA, DescrptSeA, ) +from .se_t_tebd import ( + DescrptBlockSeTTebd, + DescrptSeTTebd, +) __all__ = [ "BaseDescriptor", @@ -29,8 +33,10 @@ "DescrptBlockRepformers", "DescrptBlockSeA", "DescrptBlockSeAtten", + "DescrptBlockSeTTebd", "DescrptDPA1", "DescrptDPA2", "DescrptSeA", + "DescrptSeTTebd", "prod_env_mat", ] From 6e5ebb373d0e3c94f36d439508f198e537a1d100 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 3 Dec 2024 11:45:52 +0800 Subject: [PATCH 57/58] solve code QL --- source/tests/pd/model/test_descriptor_dpa2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/tests/pd/model/test_descriptor_dpa2.py b/source/tests/pd/model/test_descriptor_dpa2.py index e42585a433..12017bb840 100644 --- a/source/tests/pd/model/test_descriptor_dpa2.py +++ b/source/tests/pd/model/test_descriptor_dpa2.py @@ -123,7 +123,8 @@ def test_descriptor(self) -> None: ntypes = len(model_dpa2["type_map"]) dparams = model_dpa2["descriptor"] dparams["ntypes"] = ntypes - assert dparams.pop("type") == "dpa2" + assert dparams["type"] == "dpa2" + dparams.pop("type") dparams["concat_output_tebd"] = False dparams["use_tebd_bias"] = True des = DescrptDPA2( From 575726afde7a74b512650909ea9db249f202208a Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 19 Dec 2024 15:32:19 +0800 Subject: [PATCH 58/58] fix unitest and typo --- deepmd/pd/model/task/fitting.py | 4 +- deepmd/pd/utils/multi_task.py | 4 +- source/tests/pd/model/water/multitask.json | 3 +- .../pd/model/water/multitask_sharefit.json | 8 +- source/tests/pd/test_multitask.py | 114 ++++++++++++++++++ 5 files changed, 125 insertions(+), 8 deletions(-) diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index d9db44aff5..6e96b7b081 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -211,8 +211,8 @@ def __init__( if self.dim_case_embd > 0: self.register_buffer( "case_embd", - paddle.zeros(self.dim_case_embd, dtype=self.prec, place=device), - # paddle.eye(self.dim_case_embd, dtype=self.prec, place=device)[0], + paddle.zeros(self.dim_case_embd, dtype=self.prec).to(device=device), + # paddle.eye(self.dim_case_embd, dtype=self.prec).to(device=device)[0], ) else: self.case_embd = None diff --git a/deepmd/pd/utils/multi_task.py b/deepmd/pd/utils/multi_task.py index 680dc53c79..321883c12e 100644 --- a/deepmd/pd/utils/multi_task.py +++ b/deepmd/pd/utils/multi_task.py @@ -96,7 +96,9 @@ def preprocess_shared_params(model_config): shared_links = {} type_map_keys = [] - def replace_one_item(params_dict, key_type, key_in_dict, suffix="", index=None): + def replace_one_item( + params_dict, key_type, key_in_dict, suffix="", index=None + ) -> None: shared_type = key_type shared_key = key_in_dict shared_level = 0 diff --git a/source/tests/pd/model/water/multitask.json b/source/tests/pd/model/water/multitask.json index 83524a8b77..2786afca59 100644 --- a/source/tests/pd/model/water/multitask.json +++ b/source/tests/pd/model/water/multitask.json @@ -10,7 +10,8 @@ "type": "se_e2_a", "sel": [ 46, - 92 + 92, + 4 ], "rcut_smth": 0.50, "rcut": 6.00, diff --git a/source/tests/pd/model/water/multitask_sharefit.json b/source/tests/pd/model/water/multitask_sharefit.json index 246b5992f7..934ef04998 100644 --- a/source/tests/pd/model/water/multitask_sharefit.json +++ b/source/tests/pd/model/water/multitask_sharefit.json @@ -91,14 +91,14 @@ "stat_file": "./stat_files/model_1.hdf5", "training_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" }, "validation_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" @@ -108,14 +108,14 @@ "stat_file": "./stat_files/model_2.hdf5", "training_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" }, "validation_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py index f1ffc94a45..72ad251068 100644 --- a/source/tests/pd/test_multitask.py +++ b/source/tests/pd/test_multitask.py @@ -31,6 +31,7 @@ from .model.test_permutation import ( model_dpa1, model_dpa2, + model_dpa2tebd, model_se_e2_a, ) @@ -41,6 +42,13 @@ def setUpModule() -> None: with open(multitask_template_json) as f: multitask_template = json.load(f) + global multitask_sharefit_template + multitask_sharefit_template_json = str( + Path(__file__).parent / "water/multitask_sharefit.json" + ) + with open(multitask_sharefit_template_json) as f: + multitask_sharefit_template = json.load(f) + class MultiTaskTrainTest: def test_multitask_train(self) -> None: @@ -228,6 +236,46 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskSeASharefit(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_se_e2_a = deepcopy(multitask_sharefit_template) + multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a_share_fit" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_se_e2_a + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + self.share_fitting = True + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + class TestMultiTaskDPA1(unittest.TestCase, MultiTaskTrainTest): def setUp(self) -> None: multitask_DPA1 = deepcopy(multitask_template) @@ -277,6 +325,72 @@ def setUp(self) -> None: self.stat_files = "DPA2" os.makedirs(self.stat_files, exist_ok=True) self.config = multitask_DPA2 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + +class TestMultiTaskDPA2Tebd(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_DPA2 = deepcopy(multitask_template) + multitask_DPA2["model"]["shared_dict"]["my_descriptor"] = model_dpa2tebd[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "DPA2Tebd" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_DPA2 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) if __name__ == "__main__":