From 94752a56151c1d5b4c7748b61b29b65156e9d97a Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 31 Mar 2024 00:36:34 -0400 Subject: [PATCH] feat: consistent type embedding (#3617) Signed-off-by: Jinzhe Zeng --- deepmd/dpmodel/utils/type_embed.py | 124 +++++++++++++++ deepmd/pt/model/network/network.py | 141 +++++++++++++++++- deepmd/tf/env.py | 11 +- deepmd/tf/model/model.py | 2 + deepmd/tf/model/multi.py | 4 +- deepmd/tf/model/pairwise_dprc.py | 3 +- deepmd/tf/utils/graph.py | 6 +- deepmd/tf/utils/type_embed.py | 112 +++++++++++++- .../tests/consistent/test_type_embedding.py | 132 ++++++++++++++++ source/tests/pt/model/models/dpa2_tebd.pth | Bin 1085 -> 1588 bytes source/tests/pt/model/test_descriptor_dpa1.py | 12 +- source/tests/pt/model/test_descriptor_dpa2.py | 12 +- source/tests/tf/test_data_large_batch.py | 3 + source/tests/tf/test_descrpt_hybrid.py | 1 + source/tests/tf/test_descrpt_se_a_type.py | 2 + source/tests/tf/test_descrpt_se_atten.py | 4 + source/tests/tf/test_dipole_se_a_tebd.py | 1 + source/tests/tf/test_model_se_a.py | 5 +- source/tests/tf/test_model_se_a_ebd_v2.py | 1 + source/tests/tf/test_model_se_a_type.py | 1 + source/tests/tf/test_model_se_atten.py | 7 + source/tests/tf/test_polar_se_a_tebd.py | 1 + source/tests/tf/test_type_embed.py | 2 +- 23 files changed, 558 insertions(+), 29 deletions(-) create mode 100644 deepmd/dpmodel/utils/type_embed.py create mode 100644 source/tests/consistent/test_type_embedding.py diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py new file mode 100644 index 0000000000..7527c122f3 --- /dev/null +++ b/deepmd/dpmodel/utils/type_embed.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import numpy as np + +from deepmd.dpmodel.common import ( + PRECISION_DICT, + NativeOP, +) +from deepmd.dpmodel.utils.network import ( + EmbeddingNet, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +class TypeEmbedNet(NativeOP): + r"""Type embedding network. + + Parameters + ---------- + ntypes : int + Number of atom types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + resnet_dt + Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + padding + Concat the zero padding to the output, as the default embedding of empty type. + """ + + def __init__( + self, + *, + ntypes: int, + neuron: List[int], + resnet_dt: bool = False, + activation_function: str = "tanh", + precision: str = "default", + trainable: bool = True, + seed: Optional[int] = None, + padding: bool = False, + ) -> None: + self.ntypes = ntypes + self.neuron = neuron + self.seed = seed + self.resnet_dt = resnet_dt + self.precision = precision + self.activation_function = str(activation_function) + self.trainable = trainable + self.padding = padding + self.embedding_net = EmbeddingNet( + ntypes, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + ) + + def call(self) -> np.ndarray: + """Compute the type embedding network.""" + embed = self.embedding_net( + np.eye(self.ntypes, dtype=PRECISION_DICT[self.precision]) + ) + if self.padding: + embed = np.pad(embed, ((0, 1), (0, 0)), mode="constant") + return embed + + @classmethod + def deserialize(cls, data: dict): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + Model + The deserialized model + """ + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 1, 1) + data_cls = data.pop("@class") + assert data_cls == "TypeEmbedNet", f"Invalid class {data_cls}" + + embedding_net = EmbeddingNet.deserialize(data.pop("embedding")) + type_embedding_net = cls(**data) + type_embedding_net.embedding_net = embedding_net + return type_embedding_net + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + return { + "@class": "TypeEmbedNet", + "@version": 1, + "ntypes": self.ntypes, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "precision": self.precision, + "activation_function": self.activation_function, + "trainable": self.trainable, + "padding": self.padding, + "embedding": self.embedding_net.serialize(), + } diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 60d5251994..c895f642e1 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( + List, Optional, ) @@ -8,9 +9,15 @@ import torch.nn as nn import torch.nn.functional as F +from deepmd.pt.model.network.mlp import ( + EmbeddingNet, +) from deepmd.pt.utils import ( env, ) +from deepmd.utils.version import ( + check_version_compatibility, +) try: from typing import ( @@ -552,12 +559,12 @@ class TypeEmbedNet(nn.Module): def __init__(self, type_nums, embed_dim, bavg=0.0, stddev=1.0): """Construct a type embedding net.""" super().__init__() - self.embedding = nn.Embedding( - type_nums + 1, - embed_dim, - padding_idx=type_nums, - dtype=env.GLOBAL_PT_FLOAT_PRECISION, - device=env.DEVICE, + self.embedding = TypeEmbedNetConsistent( + ntypes=type_nums, + neuron=[embed_dim], + padding=True, + activation_function="Linear", + precision="default", ) # nn.init.normal_(self.embedding.weight[:-1], mean=bavg, std=stddev) @@ -571,7 +578,7 @@ def forward(self, atype): type_embedding: """ - return self.embedding(atype) + return self.embedding(atype.device)[atype] def share_params(self, base_class, shared_level, resume=False): """ @@ -590,6 +597,126 @@ def share_params(self, base_class, shared_level, resume=False): raise NotImplementedError +class TypeEmbedNetConsistent(nn.Module): + r"""Type embedding network that is consistent with other backends. + + Parameters + ---------- + ntypes : int + Number of atom types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + resnet_dt + Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + padding + Concat the zero padding to the output, as the default embedding of empty type. + """ + + def __init__( + self, + *, + ntypes: int, + neuron: List[int], + resnet_dt: bool = False, + activation_function: str = "tanh", + precision: str = "default", + trainable: bool = True, + seed: Optional[int] = None, + padding: bool = False, + ): + """Construct a type embedding net.""" + super().__init__() + self.ntypes = ntypes + self.neuron = neuron + self.seed = seed + self.resnet_dt = resnet_dt + self.precision = precision + self.prec = env.PRECISION_DICT[self.precision] + self.activation_function = str(activation_function) + self.trainable = trainable + self.padding = padding + # no way to pass seed? + self.embedding_net = EmbeddingNet( + ntypes, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + ) + for param in self.parameters(): + param.requires_grad = trainable + + def forward(self, device: torch.device): + """Caulate type embedding network. + + Returns + ------- + type_embedding: torch.Tensor + Type embedding network. + """ + embed = self.embedding_net( + torch.eye(self.ntypes, dtype=self.prec, device=device) + ) + if self.padding: + embed = torch.cat( + [embed, torch.zeros(1, embed.shape[1], dtype=self.prec, device=device)] + ) + return embed + + @classmethod + def deserialize(cls, data: dict): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + TypeEmbedNetConsistent + The deserialized model + """ + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 1, 1) + data_cls = data.pop("@class") + assert data_cls == "TypeEmbedNet", f"Invalid class {data_cls}" + + embedding_net = EmbeddingNet.deserialize(data.pop("embedding")) + type_embedding_net = cls(**data) + type_embedding_net.embedding_net = embedding_net + return type_embedding_net + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + return { + "@class": "TypeEmbedNet", + "@version": 1, + "ntypes": self.ntypes, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "precision": self.precision, + "activation_function": self.activation_function, + "trainable": self.trainable, + "padding": self.padding, + "embedding": self.embedding_net.serialize(), + } + + @torch.jit.script def gaussian(x, mean, std: float): pi = 3.14159 diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 8cc1cacad1..c7873b951c 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -168,11 +168,14 @@ def dlopen_library(module: str, filename: str): r"share_.+/idt|" )[:-1] +# subpatterns: +# \1: weight name +# \2: layer index TYPE_EMBEDDING_PATTERN = str( - r"type_embed_net+/matrix_\d+|" - r"type_embed_net+/bias_\d+|" - r"type_embed_net+/idt_\d+|" -) + r"type_embed_net/(matrix)_(\d+)|" + r"type_embed_net/(bias)_(\d+)|" + r"type_embed_net/(idt)_(\d+)|" +)[:-1] ATTENTION_LAYER_PATTERN = str( r"attention_layer_\d+/c_query/matrix|" diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 0b419e755e..76bcc6072b 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -678,6 +678,7 @@ def __init__( self.typeebd = type_embedding elif type_embedding is not None: self.typeebd = TypeEmbedNet( + ntypes=self.ntypes, **type_embedding, padding=self.descrpt.explicit_ntypes, ) @@ -686,6 +687,7 @@ def __init__( default_args_dict = {i.name: i.default for i in default_args} default_args_dict["activation_function"] = None self.typeebd = TypeEmbedNet( + ntypes=self.ntypes, **default_args_dict, padding=True, ) diff --git a/deepmd/tf/model/multi.py b/deepmd/tf/model/multi.py index 8fd4b539f1..e49ad47ee3 100644 --- a/deepmd/tf/model/multi.py +++ b/deepmd/tf/model/multi.py @@ -146,11 +146,13 @@ def __init__( dim_descrpt=self.descrpt.get_dim_out(), ) + self.ntypes = self.descrpt.get_ntypes() # type embedding if type_embedding is not None and isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding elif type_embedding is not None: self.typeebd = TypeEmbedNet( + ntypes=self.ntypes, **type_embedding, padding=self.descrpt.explicit_ntypes, ) @@ -159,6 +161,7 @@ def __init__( default_args_dict = {i.name: i.default for i in default_args} default_args_dict["activation_function"] = None self.typeebd = TypeEmbedNet( + ntypes=self.ntypes, **default_args_dict, padding=True, ) @@ -167,7 +170,6 @@ def __init__( # descriptor self.rcut = self.descrpt.get_rcut() - self.ntypes = self.descrpt.get_ntypes() # fitting self.fitting_dict = fitting_dict self.numb_fparam_dict = { diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index a67696ba97..92e943d486 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -77,11 +77,13 @@ def __init__( compress=compress, **kwargs, ) + self.ntypes = len(type_map) # type embedding if isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding else: self.typeebd = TypeEmbedNet( + ntypes=self.ntypes, **type_embedding, # must use se_atten, so it must be True padding=True, @@ -100,7 +102,6 @@ def __init__( compress=compress, ) add_data_requirement("aparam", 1, atomic=True, must=True, high_prec=False) - self.ntypes = len(type_map) self.rcut = max(self.qm_model.get_rcut(), self.qmmm_model.get_rcut()) def build( diff --git a/deepmd/tf/utils/graph.py b/deepmd/tf/utils/graph.py index 65f4a743f5..a6e2ab7422 100644 --- a/deepmd/tf/utils/graph.py +++ b/deepmd/tf/utils/graph.py @@ -400,9 +400,9 @@ def get_type_embedding_net_nodes_from_graph_def( """ if suffix != "": type_embedding_net_pattern = ( - TYPE_EMBEDDING_PATTERN.replace("/idt", suffix + "/idt") - .replace("/bias", suffix + "/bias") - .replace("/matrix", suffix + "/matrix") + TYPE_EMBEDDING_PATTERN.replace("/(idt)", suffix + "/(idt)") + .replace("/(bias)", suffix + "/(bias)") + .replace("/(matrix)", suffix + "/(matrix)") ) else: type_embedding_net_pattern = TYPE_EMBEDDING_PATTERN diff --git a/deepmd/tf/utils/type_embed.py b/deepmd/tf/utils/type_embed.py index 1cd20814d7..0f566027c1 100644 --- a/deepmd/tf/utils/type_embed.py +++ b/deepmd/tf/utils/type_embed.py @@ -1,15 +1,20 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import re from typing import ( List, Optional, Union, ) +from deepmd.dpmodel.utils.network import ( + EmbeddingNet, +) from deepmd.tf.common import ( get_activation_func, get_precision, ) from deepmd.tf.env import ( + TYPE_EMBEDDING_PATTERN, tf, ) from deepmd.tf.nvnmd.utils.config import ( @@ -21,6 +26,9 @@ from deepmd.tf.utils.network import ( embedding_net, ) +from deepmd.utils.version import ( + check_version_compatibility, +) def embed_atom_type( @@ -68,6 +76,8 @@ class TypeEmbedNet: Parameters ---------- + ntypes : int + Number of atom types neuron : list[int] Number of neurons in each hidden layers of the embedding net resnet_dt @@ -89,7 +99,9 @@ class TypeEmbedNet: def __init__( self, - neuron: List[int] = [], + *, + ntypes: int, + neuron: List[int], resnet_dt: bool = False, activation_function: Union[str, None] = "tanh", precision: str = "default", @@ -100,10 +112,12 @@ def __init__( **kwargs, ) -> None: """Constructor.""" + self.ntypes = ntypes self.neuron = neuron self.seed = seed self.filter_resnet_dt = resnet_dt self.filter_precision = get_precision(precision) + self.filter_activation_fn_name = str(activation_function) self.filter_activation_fn = get_activation_func(activation_function) self.trainable = trainable self.uniform_seed = uniform_seed @@ -133,6 +147,7 @@ def build( embedded_types The computational graph for embedded types """ + assert ntypes == self.ntypes types = tf.convert_to_tensor(list(range(ntypes)), dtype=tf.int32) ebd_type = tf.cast( tf.one_hot(tf.cast(types, dtype=tf.int32), int(ntypes)), @@ -189,3 +204,98 @@ def init_variables( self.type_embedding_net_variables = ( get_type_embedding_net_variables_from_graph_def(graph_def, suffix=suffix) ) + + @classmethod + def deserialize(cls, data: dict, suffix: str = ""): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + suffix : str, optional + The suffix of the scope + + Returns + ------- + Model + The deserialized model + """ + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 1, 1) + data_cls = data.pop("@class") + assert data_cls == "TypeEmbedNet", f"Invalid class {data_cls}" + + embedding_net = EmbeddingNet.deserialize(data.pop("embedding")) + embedding_net_variables = {} + for layer_idx, layer in enumerate(embedding_net.layers): + embedding_net_variables[ + f"type_embed_net{suffix}/matrix_{layer_idx + 1}" + ] = layer.w + embedding_net_variables[f"type_embed_net{suffix}/bias_{layer_idx + 1}"] = ( + layer.b + ) + if layer.idt is not None: + embedding_net_variables[ + f"type_embed_net{suffix}/idt_{layer_idx + 1}" + ] = layer.idt.reshape(1, -1) + else: + # prevent keyError + embedding_net_variables[ + f"type_embed_net{suffix}/idt_{layer_idx + 1}" + ] = 0.0 + + type_embedding_net = cls(**data) + type_embedding_net.type_embedding_net_variables = embedding_net_variables + return type_embedding_net + + def serialize(self, suffix: str = "") -> dict: + """Serialize the model. + + Parameters + ---------- + suffix : str, optional + The suffix of the scope + + Returns + ------- + dict + The serialized data + """ + if suffix != "": + type_embedding_pattern = ( + TYPE_EMBEDDING_PATTERN.replace("/(idt)", suffix + "/(idt)") + .replace("/(bias)", suffix + "/(bias)") + .replace("/(matrix)", suffix + "/(matrix)") + ) + else: + type_embedding_pattern = TYPE_EMBEDDING_PATTERN + assert self.type_embedding_net_variables is not None + embedding_net = EmbeddingNet( + in_dim=self.ntypes, + neuron=self.neuron, + activation_function=self.filter_activation_fn_name, + resnet_dt=self.filter_resnet_dt, + precision=self.filter_precision.name, + ) + for key, value in self.type_embedding_net_variables.items(): + m = re.search(type_embedding_pattern, key) + m = [mm for mm in m.groups() if mm is not None] + layer_idx = int(m[1]) - 1 + weight_name = m[0] + if weight_name == "idt": + value = value.ravel() + embedding_net[layer_idx][weight_name] = value + + return { + "@class": "TypeEmbedNet", + "@version": 1, + "ntypes": self.ntypes, + "neuron": self.neuron, + "resnet_dt": self.filter_resnet_dt, + "precision": self.filter_precision.name, + "activation_function": self.filter_activation_fn_name, + "trainable": self.trainable, + "padding": self.padding, + "embedding": embedding_net.serialize(), + } diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py new file mode 100644 index 0000000000..2e20142a66 --- /dev/null +++ b/source/tests/consistent/test_type_embedding.py @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, + Tuple, +) + +import numpy as np + +from deepmd.dpmodel.utils.type_embed import TypeEmbedNet as TypeEmbedNetDP +from deepmd.utils.argcheck import ( + type_embedding_args, +) + +from .common import ( + INSTALLED_PT, + INSTALLED_TF, + CommonTest, + parameterized, +) + +if INSTALLED_PT: + import torch + + from deepmd.pt.model.network.network import TypeEmbedNetConsistent as TypeEmbedNetPT + from deepmd.pt.utils.env import DEVICE as PT_DEVICE +else: + TypeEmbedNetPT = object +if INSTALLED_TF: + from deepmd.tf.utils.type_embed import TypeEmbedNet as TypeEmbedNetTF +else: + TypeEmbedNetTF = object + + +@parameterized( + (True, False), # resnet_dt + ("float32", "float64"), # precision + (True, False), # padding +) +class TestTypeEmbedding(CommonTest, unittest.TestCase): + """Useful utilities for descriptor tests.""" + + @property + def data(self) -> dict: + ( + resnet_dt, + precision, + padding, + ) = self.param + return { + "neuron": [2, 4, 4], + "resnet_dt": resnet_dt, + "precision": precision, + "seed": 20240327, + } + + tf_class = TypeEmbedNetTF + dp_class = TypeEmbedNetDP + pt_class = TypeEmbedNetPT + args = type_embedding_args() + + @property + def addtional_data(self) -> dict: + ( + resnet_dt, + precision, + padding, + ) = self.param + # implict argument not input by users + return { + "ntypes": self.ntypes, + "padding": padding, + } + + def setUp(self): + CommonTest.setUp(self) + + self.ntypes = 2 + + def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + return [ + obj.build( + obj.ntypes, + suffix=suffix, + ), + ], {} + + def eval_dp(self, dp_obj: Any) -> Any: + return (dp_obj(),) + + def eval_pt(self, pt_obj: Any) -> Any: + return [ + x.detach().cpu().numpy() if torch.is_tensor(x) else x + for x in (pt_obj(device=PT_DEVICE),) + ] + + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + return (ret[0],) + + @property + def rtol(self) -> float: + """Relative tolerance for comparing the return value.""" + ( + resnet_dt, + precision, + padding, + ) = self.param + if precision == "float64": + return 1e-10 + elif precision == "float32": + return 1e-4 + elif precision == "bfloat16": + return 1e-1 + else: + raise ValueError(f"Unknown precision: {precision}") + + @property + def atol(self) -> float: + """Absolute tolerance for comparing the return value.""" + ( + resnet_dt, + precision, + padding, + ) = self.param + if precision == "float64": + return 1e-10 + elif precision == "float32": + return 1e-4 + elif precision == "bfloat16": + return 1e-1 + else: + raise ValueError(f"Unknown precision: {precision}") diff --git a/source/tests/pt/model/models/dpa2_tebd.pth b/source/tests/pt/model/models/dpa2_tebd.pth index 3d4fc5511c93036a18be1b290fcdecc482215bf6..6a1cc225f1e7b8e57965889504cfaa2e1902337b 100644 GIT binary patch literal 1588 zcmbtUO=uHQ5PtdJY>l?|$3&~(p*3PnHrX^y1xbxH9u{ocqK2Xko6XWJZj!$2rnE|` zmlj$OyjZbygHM+bO5TX z!6V2u9}Dx5h!7vK6~97pUbczs1fP^_lr6@}NnsqSIXRK!qP9><7DR~*CHZhl5F;U( zk4uSUXv|4Mjnx`7AeR!dWJh?Y%|poEL@F%ugZWbu>Vhg{L_vK}gWlX|3K|%*42gaQ zFlvUFhDL^9RMU{0iQ+u#WVmYpnu2ZrlWbUECAd3^GKp%V`9Kc1Cu(2_8P?+f#1w2` zR8w$o-g%=8n+9NWDh*qLMid|v<7F1#EZmn1Zng%|DS(zV+@Gs$3|$JkPk4Y)NjXIE z)?jNI9*pAiloYlF7z5EZYwVgtwPFp_WU~$!5u4!&nNMJf{61ZtPEn9$k;i_fPJXrM zQ5INh`yqg~&=q#d?*m}wk|ooS{^?zQ{qnu3e^EKtd@TE6;g@&y)Pp9^>32%U;r`{{ z4>HQe@bm2Tzz=0MadED9Wy!nR{>fWEyrQh9O`DIOt|*hSM|W2*Y*1gxb zNRQdHraZfRcJZ@rQ<-0wYrXSrP5GF)-DkPC{I6v07q2PPliQvlFr9tN_GC)| z-3}-1a5-?6?PsQ+SW$*p1B?nMTiiUQT}6<12xsq?d2M z7L-^%pq;8jl~PrXsz+<4fI`KV7gtITp8svRl|vOst%Q}{mw6k5zOp%3btVIhUf)r$ tKBPHb#NoNdvh^Oq`w_`F+W%M(4zE>^)!`=q_;-NO<;?X6h3)gUe*pFXMJoUR delta 555 zcmdnOvzKFnl$;m?4?}K#N@`Agd1_{QMoF=LN@7W(UO{$_n-d4aL<>#P2muBLhSc1o z)RdIWymUQ?vWb1^^~~NJZH1iP9Nvs=ghX1vgk8KMNM{KdL=8$V2h>>YO|Y@ zC0OBP1s1N!2be{i7@0sW