diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml
index 036fc076fa..5f10a85269 100644
--- a/.github/workflows/build_wheel.yml
+++ b/.github/workflows/build_wheel.yml
@@ -47,7 +47,7 @@ jobs:
name: Setup QEMU
if: matrix.platform_id == 'manylinux_aarch64'
- name: Build wheels
- uses: pypa/cibuildwheel@v2.14
+ uses: pypa/cibuildwheel@v2.15
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_ARCHS: all
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8aeece125d..169ac19885 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,7 +9,7 @@ repos:
- id: end-of-file-fixer
exclude: "^.+\\.pbtxt$"
- id: check-yaml
- #- id: check-json
+ - id: check-json
- id: check-added-large-files
args: ['--maxkb=1024', '--enforce-all']
# TODO: remove the following after resolved
@@ -33,7 +33,7 @@ repos:
files: \.py$
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: v0.0.280
+ rev: v0.0.286
hooks:
- id: ruff
args: ["--fix"]
@@ -45,7 +45,7 @@ repos:
args: ["--write"]
# Python inside docs
- repo: https://github.com/asottile/blacken-docs
- rev: 1.15.0
+ rev: 1.16.0
hooks:
- id: blacken-docs
# C++
@@ -72,7 +72,7 @@ repos:
#- id: cmake-lint
# license header
- repo: https://github.com/Lucas-C/pre-commit-hooks
- rev: v1.5.1
+ rev: v1.5.4
hooks:
# C++, js
- id: insert-license
diff --git a/README.md b/README.md
index 0d212455fd..76f5c9d3bb 100644
--- a/README.md
+++ b/README.md
@@ -102,6 +102,7 @@ A full [document](doc/train/train-input-auto.rst) on options in the training inp
- [Descriptor `"se_e2_r"`](doc/model/train-se-e2-r.md)
- [Descriptor `"se_e3"`](doc/model/train-se-e3.md)
- [Descriptor `"se_atten"`](doc/model/train-se-atten.md)
+ - [Descriptor `"se_atten_v2"`](doc/model/train-se-atten.md#descriptor-se_atten_v2)
- [Descriptor `"hybrid"`](doc/model/train-hybrid.md)
- [Descriptor `sel`](doc/model/sel.md)
- [Fit energy](doc/model/train-energy.md)
diff --git a/deepmd/descriptor/__init__.py b/deepmd/descriptor/__init__.py
index 4d57ac1b6b..ab726d95c8 100644
--- a/deepmd/descriptor/__init__.py
+++ b/deepmd/descriptor/__init__.py
@@ -24,6 +24,9 @@
from .se_atten import (
DescrptSeAtten,
)
+from .se_atten_v2 import (
+ DescrptSeAttenV2,
+)
from .se_r import (
DescrptSeR,
)
@@ -41,6 +44,7 @@
"DescrptSeAEfLower",
"DescrptSeAMask",
"DescrptSeAtten",
+ "DescrptSeAttenV2",
"DescrptSeR",
"DescrptSeT",
]
diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py
index 64acd769dd..12558c45c4 100644
--- a/deepmd/descriptor/se_atten.py
+++ b/deepmd/descriptor/se_atten.py
@@ -108,6 +108,13 @@ class DescrptSeAtten(DescrptSeA):
Whether to mask the diagonal in the attention weights.
multi_task
If the model has multi fitting nets to train.
+ stripped_type_embedding
+ Whether to strip the type embedding into a separated embedding network.
+ Default value will be True in `se_atten_v2` descriptor.
+ smooth_type_embdding
+ When using stripped type embedding, whether to dot smooth factor on the network output of type embedding
+ to keep the network smooth, instead of setting `set_davg_zero` to be True.
+ Default value will be True in `se_atten_v2` descriptor.
"""
def __init__(
@@ -133,9 +140,10 @@ def __init__(
attn_mask: bool = False,
multi_task: bool = False,
stripped_type_embedding: bool = False,
+ smooth_type_embdding: bool = False,
**kwargs,
) -> None:
- if not set_davg_zero:
+ if not set_davg_zero and not (stripped_type_embedding and smooth_type_embdding):
warnings.warn(
"Set 'set_davg_zero' False in descriptor 'se_atten' "
"may cause unexpected incontinuity during model inference!"
@@ -166,6 +174,7 @@ def __init__(
"2"
), "se_atten only support tensorflow version 2.0 or higher."
self.stripped_type_embedding = stripped_type_embedding
+ self.smooth = smooth_type_embdding
self.ntypes = ntypes
self.att_n = attn
self.attn_layer = attn_layer
@@ -607,6 +616,7 @@ def build(
sel_a=self.sel_all_a,
sel_r=self.sel_all_r,
)
+
self.nei_type_vec = tf.reshape(self.nei_type_vec, [-1])
self.nmask = tf.cast(
tf.reshape(self.nmask, [-1, 1, self.sel_all_a[0]]),
@@ -625,6 +635,41 @@ def build(
tf.slice(atype, [0, 0], [-1, natoms[0]]), [-1]
) ## lammps will have error without this
self._identity_tensors(suffix=suffix)
+ if self.smooth:
+ self.sliced_avg = tf.reshape(
+ tf.slice(
+ tf.reshape(self.t_avg, [self.ntypes, -1, 4]), [0, 0, 0], [-1, 1, 1]
+ ),
+ [self.ntypes, 1],
+ )
+ self.sliced_std = tf.reshape(
+ tf.slice(
+ tf.reshape(self.t_std, [self.ntypes, -1, 4]), [0, 0, 0], [-1, 1, 1]
+ ),
+ [self.ntypes, 1],
+ )
+ self.avg_looked_up = tf.reshape(
+ tf.nn.embedding_lookup(self.sliced_avg, self.atype_nloc),
+ [-1, natoms[0], 1],
+ )
+ self.std_looked_up = tf.reshape(
+ tf.nn.embedding_lookup(self.sliced_std, self.atype_nloc),
+ [-1, natoms[0], 1],
+ )
+ self.recovered_r = (
+ tf.reshape(
+ tf.slice(tf.reshape(self.descrpt, [-1, 4]), [0, 0], [-1, 1]),
+ [-1, natoms[0], self.sel_all_a[0]],
+ )
+ * self.std_looked_up
+ + self.avg_looked_up
+ )
+ uu = 1 - self.rcut_r_smth * self.recovered_r
+ self.recovered_switch = -uu * uu * uu + 1
+ self.recovered_switch = tf.clip_by_value(self.recovered_switch, 0.0, 1.0)
+ self.recovered_switch = tf.cast(
+ self.recovered_switch, self.filter_precision
+ )
self.dout, self.qmat = self._pass_filter(
self.descrpt_reshape,
@@ -1146,9 +1191,10 @@ def _filter_lower(
two_embd = tf.nn.embedding_lookup(
embedding_of_two_side_type_embedding, index_of_two_side
)
-
+ if self.smooth:
+ two_embd = two_embd * tf.reshape(self.recovered_switch, [-1, 1])
if not self.compress:
- xyz_scatter = xyz_scatter * two_embd + two_embd
+ xyz_scatter = xyz_scatter * two_embd + xyz_scatter
else:
return op_module.tabulate_fusion_se_atten(
tf.cast(self.table.data[net], self.filter_precision),
diff --git a/deepmd/descriptor/se_atten_v2.py b/deepmd/descriptor/se_atten_v2.py
new file mode 100644
index 0000000000..0e1a70262f
--- /dev/null
+++ b/deepmd/descriptor/se_atten_v2.py
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: LGPL-3.0-or-later
+import logging
+from typing import (
+ List,
+ Optional,
+)
+
+from .descriptor import (
+ Descriptor,
+)
+from .se_atten import (
+ DescrptSeAtten,
+)
+
+log = logging.getLogger(__name__)
+
+
+@Descriptor.register("se_atten_v2")
+class DescrptSeAttenV2(DescrptSeAtten):
+ r"""Smooth version 2.0 descriptor with attention.
+
+ Parameters
+ ----------
+ rcut
+ The cut-off radius :math:`r_c`
+ rcut_smth
+ From where the environment matrix should be smoothed :math:`r_s`
+ sel : list[str]
+ sel[i] specifies the maxmum number of type i atoms in the cut-off radius
+ neuron : list[int]
+ Number of neurons in each hidden layers of the embedding net :math:`\mathcal{N}`
+ axis_neuron
+ Number of the axis neuron :math:`M_2` (number of columns of the sub-matrix of the embedding matrix)
+ resnet_dt
+ Time-step `dt` in the resnet construction:
+ y = x + dt * \phi (Wx + b)
+ trainable
+ If the weights of embedding net are trainable.
+ seed
+ Random seed for initializing the network parameters.
+ type_one_side
+ Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets
+ exclude_types : List[List[int]]
+ The excluded pairs of types which have no interaction with each other.
+ For example, `[[0, 1]]` means no interaction between type 0 and type 1.
+ set_davg_zero
+ Set the shift of embedding net input to zero.
+ activation_function
+ The activation function in the embedding net. Supported options are |ACTIVATION_FN|
+ precision
+ The precision of the embedding net parameters. Supported options are |PRECISION|
+ uniform_seed
+ Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
+ attn
+ The length of hidden vector during scale-dot attention computation.
+ attn_layer
+ The number of layers in attention mechanism.
+ attn_dotr
+ Whether to dot the relative coordinates on the attention weights as a gated scheme.
+ attn_mask
+ Whether to mask the diagonal in the attention weights.
+ multi_task
+ If the model has multi fitting nets to train.
+ """
+
+ def __init__(
+ self,
+ rcut: float,
+ rcut_smth: float,
+ sel: int,
+ ntypes: int,
+ neuron: List[int] = [24, 48, 96],
+ axis_neuron: int = 8,
+ resnet_dt: bool = False,
+ trainable: bool = True,
+ seed: Optional[int] = None,
+ type_one_side: bool = True,
+ set_davg_zero: bool = False,
+ exclude_types: List[List[int]] = [],
+ activation_function: str = "tanh",
+ precision: str = "default",
+ uniform_seed: bool = False,
+ attn: int = 128,
+ attn_layer: int = 2,
+ attn_dotr: bool = True,
+ attn_mask: bool = False,
+ multi_task: bool = False,
+ **kwargs,
+ ) -> None:
+ DescrptSeAtten.__init__(
+ self,
+ rcut,
+ rcut_smth,
+ sel,
+ ntypes,
+ neuron=neuron,
+ axis_neuron=axis_neuron,
+ resnet_dt=resnet_dt,
+ trainable=trainable,
+ seed=seed,
+ type_one_side=type_one_side,
+ set_davg_zero=set_davg_zero,
+ exclude_types=exclude_types,
+ activation_function=activation_function,
+ precision=precision,
+ uniform_seed=uniform_seed,
+ attn=attn,
+ attn_layer=attn_layer,
+ attn_dotr=attn_dotr,
+ attn_mask=attn_mask,
+ multi_task=multi_task,
+ stripped_type_embedding=True,
+ smooth_type_embdding=True,
+ **kwargs,
+ )
diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py
index d285c996e8..e348318f41 100644
--- a/deepmd/entrypoints/test.py
+++ b/deepmd/entrypoints/test.py
@@ -71,7 +71,7 @@ def test(
set_prefix : str
string prefix of set
numb_test : int
- munber of tests to do
+ munber of tests to do. 0 means all data.
rand_seed : Optional[int]
seed for random generator
shuffle_test : bool
@@ -88,6 +88,9 @@ def test(
RuntimeError
if no valid system was found
"""
+ if numb_test == 0:
+ # only float has inf, but should work for min
+ numb_test = float("inf")
if datafile is not None:
datalist = open(datafile)
all_sys = datalist.read().splitlines()
@@ -934,18 +937,40 @@ def test_dipole(
if detail_file is not None:
detail_path = Path(detail_file)
+ if not atomic:
+ pe = np.concatenate(
+ (
+ np.reshape(test_data["dipole"][:numb_test], [-1, 3]),
+ np.reshape(dipole, [-1, 3]),
+ ),
+ axis=1,
+ )
+ header_text = "data_x data_y data_z pred_x pred_y pred_z"
+ else:
+ pe = np.concatenate(
+ (
+ np.reshape(
+ test_data["atomic_dipole"][:numb_test], [-1, 3 * sel_natoms]
+ ),
+ np.reshape(dipole, [-1, 3 * sel_natoms]),
+ ),
+ axis=1,
+ )
+ header_text = [
+ f"{letter}{number}"
+ for number in range(1, sel_natoms + 1)
+ for letter in ["data_x", "data_y", "data_z"]
+ ] + [
+ f"{letter}{number}"
+ for number in range(1, sel_natoms + 1)
+ for letter in ["pred_x", "pred_y", "pred_z"]
+ ]
+ header_text = " ".join(header_text)
- pe = np.concatenate(
- (
- np.reshape(test_data["dipole"][:numb_test], [-1, 3]),
- np.reshape(dipole, [-1, 3]),
- ),
- axis=1,
- )
np.savetxt(
detail_path.with_suffix(".out"),
pe,
- header="data_x data_y data_z pred_x pred_y pred_z",
+ header=header_text,
)
return {"rmse": (rmse_f, dipole.size)}
diff --git a/deepmd/entrypoints/train.py b/deepmd/entrypoints/train.py
index 57a306dece..fa3a82bbdf 100755
--- a/deepmd/entrypoints/train.py
+++ b/deepmd/entrypoints/train.py
@@ -445,7 +445,7 @@ def get_min_nbor_dist(jdata, rcut):
def parse_auto_sel(sel):
- if type(sel) is not str:
+ if not isinstance(sel, str):
return False
words = sel.split(":")
if words[0] == "auto":
@@ -476,7 +476,15 @@ def update_one_sel(jdata, descriptor):
if descriptor["type"] == "loc_frame":
return descriptor
rcut = descriptor["rcut"]
- tmp_sel = get_sel(jdata, rcut, one_type=descriptor["type"] in ("se_atten",))
+ tmp_sel = get_sel(
+ jdata,
+ rcut,
+ one_type=descriptor["type"]
+ in (
+ "se_atten",
+ "se_atten_v2",
+ ),
+ )
sel = descriptor["sel"]
if isinstance(sel, int):
# convert to list and finnally convert back to int
@@ -495,7 +503,10 @@ def update_one_sel(jdata, descriptor):
"not less than %d, but you set it to %d. The accuracy"
" of your model may get worse." % (ii, tt, dd)
)
- if descriptor["type"] in ("se_atten",):
+ if descriptor["type"] in (
+ "se_atten",
+ "se_atten_v2",
+ ):
descriptor["sel"] = sel = sum(sel)
return descriptor
diff --git a/deepmd/env.py b/deepmd/env.py
index 77f36f60b3..615e89f3ac 100644
--- a/deepmd/env.py
+++ b/deepmd/env.py
@@ -48,7 +48,7 @@ def dlopen_library(module: str, filename: str):
except ModuleNotFoundError:
pass
else:
- libs = sorted(Path(m.__file__).parent.glob(filename))
+ libs = sorted(Path(m.__path__[0]).glob(filename))
# hope that there is only one version installed...
if len(libs):
ctypes.CDLL(str(libs[0].absolute()))
@@ -418,9 +418,9 @@ def get_module(module_name: str) -> "ModuleType":
) from e
error_message = (
"This deepmd-kit package is inconsitent with TensorFlow "
- "Runtime, thus an error is raised when loading {}. "
+ f"Runtime, thus an error is raised when loading {module_name}. "
"You need to rebuild deepmd-kit against this TensorFlow "
- "runtime.".format(module_name)
+ "runtime."
)
if TF_CXX11_ABI_FLAG == 1:
# #1791
diff --git a/deepmd/fit/polar.py b/deepmd/fit/polar.py
index 11e80c6641..0a6f7d4242 100644
--- a/deepmd/fit/polar.py
+++ b/deepmd/fit/polar.py
@@ -107,7 +107,7 @@ def __init__(
self.scale = [1.0 for ii in range(self.ntypes)]
# if self.diag_shift is None:
# self.diag_shift = [0.0 for ii in range(self.ntypes)]
- if type(self.sel_type) is not list:
+ if not isinstance(self.sel_type, list):
self.sel_type = [self.sel_type]
self.sel_type = sorted(self.sel_type)
self.constant_matrix = np.zeros(
@@ -115,7 +115,7 @@ def __init__(
) # self.ntypes x 1, store the average diagonal value
# if type(self.diag_shift) is not list:
# self.diag_shift = [self.diag_shift]
- if type(self.scale) is not list:
+ if not isinstance(self.scale, list):
self.scale = [self.scale for ii in range(self.ntypes)]
self.scale = np.array(self.scale)
self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py
index 122dfd7442..b3e9be1e67 100644
--- a/deepmd/infer/deep_pot.py
+++ b/deepmd/infer/deep_pot.py
@@ -114,15 +114,16 @@ def __init__(
operations = [op.name for op in self.graph.get_operations()]
# check if the graph has these operations:
# if yes add them
- if "t_efield" in operations:
- self._get_tensor("t_efield:0", "t_efield")
+
+ if ("%s/t_efield" % load_prefix) in operations:
+ self.tensors.update({"t_efield": "t_efield:0"})
self.has_efield = True
else:
log.debug("Could not get tensor 't_efield:0'")
self.t_efield = None
self.has_efield = False
- if "load/t_fparam" in operations:
+ if ("%s/t_fparam" % load_prefix) in operations:
self.tensors.update({"t_fparam": "t_fparam:0"})
self.has_fparam = True
else:
@@ -130,7 +131,7 @@ def __init__(
self.t_fparam = None
self.has_fparam = False
- if "load/t_aparam" in operations:
+ if ("%s/t_aparam" % load_prefix) in operations:
self.tensors.update({"t_aparam": "t_aparam:0"})
self.has_aparam = True
else:
@@ -138,7 +139,7 @@ def __init__(
self.t_aparam = None
self.has_aparam = False
- if "load/spin_attr/ntypes_spin" in operations:
+ if ("%s/spin_attr/ntypes_spin" % load_prefix) in operations:
self.tensors.update({"t_ntypes_spin": "spin_attr/ntypes_spin:0"})
self.has_spin = True
else:
@@ -399,7 +400,6 @@ def _prepare_feed_dict(
atom_types,
fparam=None,
aparam=None,
- atomic=False,
efield=None,
mixed_type=False,
):
diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py
index 0274384188..e9950f9d5e 100644
--- a/deepmd/infer/model_devi.py
+++ b/deepmd/infer/model_devi.py
@@ -21,13 +21,18 @@
)
-def calc_model_devi_f(fs: np.ndarray) -> Tuple[np.ndarray]:
+def calc_model_devi_f(
+ fs: np.ndarray, real_f: Optional[np.ndarray] = None
+) -> Tuple[np.ndarray]:
"""Calculate model deviation of force.
Parameters
----------
fs : numpy.ndarray
size of `n_models x n_frames x n_atoms x 3`
+ real_f : numpy.ndarray or None
+ real force, size of `n_frames x n_atoms x 3`. If given,
+ the RMS real error is calculated instead.
Returns
-------
@@ -38,14 +43,21 @@ def calc_model_devi_f(fs: np.ndarray) -> Tuple[np.ndarray]:
avg_devi_f : numpy.ndarray
average deviation of force in all atoms
"""
- fs_devi = np.linalg.norm(np.std(fs, axis=0), axis=-1)
+ if real_f is None:
+ fs_devi = np.linalg.norm(np.std(fs, axis=0), axis=-1)
+ else:
+ fs_devi = np.linalg.norm(
+ np.sqrt(np.mean(np.square(fs - real_f), axis=0)), axis=-1
+ )
max_devi_f = np.max(fs_devi, axis=-1)
min_devi_f = np.min(fs_devi, axis=-1)
avg_devi_f = np.mean(fs_devi, axis=-1)
return max_devi_f, min_devi_f, avg_devi_f
-def calc_model_devi_e(es: np.ndarray) -> np.ndarray:
+def calc_model_devi_e(
+ es: np.ndarray, real_e: Optional[np.ndarray] = None
+) -> np.ndarray:
"""Calculate model deviation of total energy per atom.
Here we don't use the atomic energy, as the decomposition
@@ -56,24 +68,35 @@ def calc_model_devi_e(es: np.ndarray) -> np.ndarray:
----------
es : numpy.ndarray
size of `n_models x n_frames x 1
+ real_e : numpy.ndarray
+ real energy, size of `n_frames x 1`. If given,
+ the RMS real error is calculated instead.
Returns
-------
max_devi_e : numpy.ndarray
maximum deviation of energy
"""
- es_devi = np.std(es, axis=0)
+ if real_e is None:
+ es_devi = np.std(es, axis=0)
+ else:
+ es_devi = np.sqrt(np.mean(np.square(es - real_e), axis=0))
es_devi = np.squeeze(es_devi, axis=-1)
return es_devi
-def calc_model_devi_v(vs: np.ndarray) -> Tuple[np.ndarray]:
+def calc_model_devi_v(
+ vs: np.ndarray, real_v: Optional[np.ndarray] = None
+) -> Tuple[np.ndarray]:
"""Calculate model deviation of virial.
Parameters
----------
vs : numpy.ndarray
size of `n_models x n_frames x 9`
+ real_v : numpy.ndarray
+ real virial, size of `n_frames x 9`. If given,
+ the RMS real error is calculated instead.
Returns
-------
@@ -84,7 +107,10 @@ def calc_model_devi_v(vs: np.ndarray) -> Tuple[np.ndarray]:
avg_devi_v : numpy.ndarray
average deviation of virial in 9 elements
"""
- vs_devi = np.std(vs, axis=0)
+ if real_v is None:
+ vs_devi = np.std(vs, axis=0)
+ else:
+ vs_devi = np.sqrt(np.mean(np.square(vs - real_v), axis=0))
max_devi_v = np.max(vs_devi, axis=-1)
min_devi_v = np.min(vs_devi, axis=-1)
avg_devi_v = np.linalg.norm(vs_devi, axis=-1) / 3
@@ -148,6 +174,7 @@ def calc_model_devi(
mixed_type=False,
fparam: Optional[np.ndarray] = None,
aparam: Optional[np.ndarray] = None,
+ real_data: Optional[dict] = None,
):
"""Python interface to calculate model deviation.
@@ -171,6 +198,8 @@ def calc_model_devi(
frame specific parameters
aparam : numpy.ndarray
atomic specific parameters
+ real_data : dict, optional
+ real data to calculate RMS real error
Returns
-------
@@ -211,9 +240,14 @@ def calc_model_devi(
virials = np.array(virials)
devi = [np.arange(coord.shape[0]) * frequency]
- devi += list(calc_model_devi_v(virials))
- devi += list(calc_model_devi_f(forces))
- devi.append(calc_model_devi_e(energies))
+ if real_data is None:
+ devi += list(calc_model_devi_v(virials))
+ devi += list(calc_model_devi_f(forces))
+ devi.append(calc_model_devi_e(energies))
+ else:
+ devi += list(calc_model_devi_v(virials, real_data["virial"]))
+ devi += list(calc_model_devi_f(forces, real_data["force"]))
+ devi.append(calc_model_devi_e(energies, real_data["energy"]))
devi = np.vstack(devi).T
if fname:
write_model_devi_out(devi, fname)
@@ -221,7 +255,14 @@ def calc_model_devi(
def make_model_devi(
- *, models: list, system: str, set_prefix: str, output: str, frequency: int, **kwargs
+ *,
+ models: list,
+ system: str,
+ set_prefix: str,
+ output: str,
+ frequency: int,
+ real_error: bool = False,
+ **kwargs,
):
"""Make model deviation calculation.
@@ -239,6 +280,8 @@ def make_model_devi(
The number of steps that elapse between writing coordinates
in a trajectory by a MD engine (such as Gromacs / Lammps).
This paramter is used to determine the index in the output file.
+ real_error : bool, default: False
+ If True, calculate the RMS real error instead of model deviation.
**kwargs
Arbitrary keyword arguments.
"""
@@ -279,6 +322,29 @@ def make_model_devi(
must=True,
high_prec=False,
)
+ if real_error:
+ dp_data.add(
+ "energy",
+ 1,
+ atomic=False,
+ must=False,
+ high_prec=True,
+ )
+ dp_data.add(
+ "force",
+ 3,
+ atomic=True,
+ must=False,
+ high_prec=False,
+ )
+ dp_data.add(
+ "virial",
+ 9,
+ atomic=False,
+ must=False,
+ high_prec=False,
+ )
+
mixed_type = dp_data.mixed_type
data_sets = [dp_data._load_set(set_name) for set_name in dp_data.dirs]
@@ -301,6 +367,15 @@ def make_model_devi(
aparam = data["aparam"]
else:
aparam = None
+ if real_error:
+ natoms = atype.shape[-1]
+ real_data = {
+ "energy": data["energy"] / natoms,
+ "force": data["force"].reshape([-1, natoms, 3]),
+ "virial": data["virial"] / natoms,
+ }
+ else:
+ real_data = None
devi = calc_model_devi(
coord,
box,
@@ -309,6 +384,7 @@ def make_model_devi(
mixed_type=mixed_type,
fparam=fparam,
aparam=aparam,
+ real_data=real_data,
)
nframes_tot += coord.shape[0]
devis.append(devi)
diff --git a/deepmd/nvnmd/utils/config.py b/deepmd/nvnmd/utils/config.py
index e765b59786..96ca74c4c9 100644
--- a/deepmd/nvnmd/utils/config.py
+++ b/deepmd/nvnmd/utils/config.py
@@ -156,7 +156,7 @@ def init_dscp(self, jdata: dict, jdata_parent: dict = {}) -> dict:
jdata["SEL"] = (jdata["sel"] + [0, 0, 0, 0])[0:4]
jdata["NNODE_FEAS"] = [1] + jdata["neuron"]
jdata["nlayer_fea"] = len(jdata["neuron"])
- jdata["same_net"] = int(1) if jdata["type_one_side"] else int(0)
+ jdata["same_net"] = 1 if jdata["type_one_side"] else 0
# neighbor
jdata["NIDP"] = int(np.sum(jdata["sel"]))
jdata["NIX"] = 2 ** int(np.ceil(np.log2(jdata["NIDP"] / 1.5)))
@@ -170,7 +170,7 @@ def init_dscp(self, jdata: dict, jdata_parent: dict = {}) -> dict:
jdata["SEL"] = jdata["sel"]
jdata["NNODE_FEAS"] = [1] + jdata["neuron"]
jdata["nlayer_fea"] = len(jdata["neuron"])
- jdata["same_net"] = int(1) if jdata["type_one_side"] else int(0)
+ jdata["same_net"] = 1 if jdata["type_one_side"] else 0
# neighbor
jdata["NIDP"] = int(jdata["sel"])
jdata["NIX"] = 2 ** int(np.ceil(np.log2(jdata["NIDP"] / 1.5)))
diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py
index e37e03f9e0..b67722bd89 100644
--- a/deepmd/utils/argcheck.py
+++ b/deepmd/utils/argcheck.py
@@ -333,9 +333,7 @@ def descrpt_hybrid_args():
]
-@descrpt_args_plugin.register("se_atten")
-def descrpt_se_atten_args():
- doc_stripped_type_embedding = "Whether to strip the type embedding into a separated embedding network. Setting it to `False` will fall back to the previous version of `se_atten` which is non-compressible."
+def descrpt_se_atten_common_args():
doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\
- `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\
- `List[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\
@@ -350,7 +348,6 @@ def descrpt_se_atten_args():
doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_trainable = "If the parameters in the embedding net is trainable"
doc_seed = "Random seed for parameter initialization"
- doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"
doc_exclude_types = "The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1."
doc_attn = "The length of hidden vectors in attention layers"
doc_attn_layer = "The number of attention layers. Note that model compression of `se_atten` is only enabled when attn_layer==0 and stripped_type_embedding is True"
@@ -358,13 +355,6 @@ def descrpt_se_atten_args():
doc_attn_mask = "Whether to do mask on the diagonal in the attention matrix"
return [
- Argument(
- "stripped_type_embedding",
- bool,
- optional=True,
- default=False,
- doc=doc_stripped_type_embedding,
- ),
Argument("sel", [int, list, str], optional=True, default="auto", doc=doc_sel),
Argument("rcut", float, optional=True, default=6.0, doc=doc_rcut),
Argument("rcut_smth", float, optional=True, default=0.5, doc=doc_rcut_smth),
@@ -394,9 +384,6 @@ def descrpt_se_atten_args():
Argument(
"exclude_types", list, optional=True, default=[], doc=doc_exclude_types
),
- Argument(
- "set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero
- ),
Argument("attn", int, optional=True, default=128, doc=doc_attn),
Argument("attn_layer", int, optional=True, default=2, doc=doc_attn_layer),
Argument("attn_dotr", bool, optional=True, default=True, doc=doc_attn_dotr),
@@ -404,6 +391,44 @@ def descrpt_se_atten_args():
]
+@descrpt_args_plugin.register("se_atten")
+def descrpt_se_atten_args():
+ doc_stripped_type_embedding = "Whether to strip the type embedding into a separated embedding network. Setting it to `False` will fall back to the previous version of `se_atten` which is non-compressible."
+ doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True."
+ doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"
+
+ return descrpt_se_atten_common_args() + [
+ Argument(
+ "stripped_type_embedding",
+ bool,
+ optional=True,
+ default=False,
+ doc=doc_stripped_type_embedding,
+ ),
+ Argument(
+ "smooth_type_embdding",
+ bool,
+ optional=True,
+ default=False,
+ doc=doc_smooth_type_embdding,
+ ),
+ Argument(
+ "set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero
+ ),
+ ]
+
+
+@descrpt_args_plugin.register("se_atten_v2")
+def descrpt_se_atten_v2_args():
+ doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"
+
+ return descrpt_se_atten_common_args() + [
+ Argument(
+ "set_davg_zero", bool, optional=True, default=False, doc=doc_set_davg_zero
+ ),
+ ]
+
+
@descrpt_args_plugin.register("se_a_mask")
def descrpt_se_a_mask_args():
doc_sel = 'This parameter sets the number of selected neighbors for each type of atom. It can be:\n\n\
@@ -459,6 +484,7 @@ def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant:
link_se_a_tpe = make_link("se_a_tpe", "model/descriptor[se_a_tpe]")
link_hybrid = make_link("hybrid", "model/descriptor[hybrid]")
link_se_atten = make_link("se_atten", "model/descriptor[se_atten]")
+ link_se_atten_v2 = make_link("se_atten_v2", "model/descriptor[se_atten_v2]")
doc_descrpt_type = "The type of the descritpor. See explanation below. \n\n\
- `loc_frame`: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\
- `se_e2_a`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\
@@ -466,6 +492,7 @@ def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant:
- `se_e3`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.\n\n\
- `se_a_tpe`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.\n\n\
- `se_atten`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.\n\n\
+- `se_atten_v2`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.\n\n\
- `se_a_mask`: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). *aparam* are required as an indicator matrix for the real/virtual sign of input atoms. \n\n\
- `hybrid`: Concatenate of a list of descriptors as a new descriptor."
@@ -1861,8 +1888,8 @@ def normalize_fitting_weight(fitting_keys, data_keys, fitting_weight=None):
else:
valid_fitting_keys.remove(item)
log.warning(
- "Fitting net '{}' has zero or invalid weight "
- "and will not be used in training.".format(item)
+ f"Fitting net '{item}' has zero or invalid weight "
+ "and will not be used in training."
)
new_weight[item] = 0.0
else:
diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py
index aaa4575c86..b641a6beca 100644
--- a/deepmd/utils/finetune.py
+++ b/deepmd/utils/finetune.py
@@ -42,10 +42,11 @@ def replace_model_params_with_pretrained_model(
# Check the model type
assert pretrained_jdata["model"]["descriptor"]["type"] in [
- "se_atten"
+ "se_atten",
+ "se_atten_v2",
] and pretrained_jdata["model"]["fitting_net"]["type"] in [
"ener"
- ], "The finetune process only supports models pretrained with 'se_atten' descriptor and 'ener' fitting_net!"
+ ], "The finetune process only supports models pretrained with 'se_atten' or 'se_atten_v2' descriptor and 'ener' fitting_net!"
# Check the type map
pretrained_type_map = pretrained_jdata["model"]["type_map"]
@@ -55,8 +56,8 @@ def replace_model_params_with_pretrained_model(
if i not in pretrained_type_map:
out_line_type.append(i)
assert not out_line_type, (
- "{} type(s) not contained in the pretrained model! "
- "Please choose another suitable one.".format(str(out_line_type))
+ f"{str(out_line_type)} type(s) not contained in the pretrained model! "
+ "Please choose another suitable one."
)
if cur_type_map != pretrained_type_map:
log.info(
diff --git a/deepmd/utils/multi_init.py b/deepmd/utils/multi_init.py
index cbd920074f..fd56f715c5 100644
--- a/deepmd/utils/multi_init.py
+++ b/deepmd/utils/multi_init.py
@@ -54,8 +54,8 @@ def replace_model_params_with_frz_multi_model(
if i not in pretrained_type_map:
out_line_type.append(i)
assert not out_line_type, (
- "{} type(s) not contained in the pretrained model! "
- "Please choose another suitable one.".format(str(out_line_type))
+ f"{str(out_line_type)} type(s) not contained in the pretrained model! "
+ "Please choose another suitable one."
)
if cur_type_map != pretrained_type_map:
log.info(
diff --git a/deepmd_cli/main.py b/deepmd_cli/main.py
index f4c28887bd..a6b293020b 100644
--- a/deepmd_cli/main.py
+++ b/deepmd_cli/main.py
@@ -274,7 +274,11 @@ def main_parser() -> argparse.ArgumentParser:
"-S", "--set-prefix", default="set", type=str, help="The set prefix"
)
parser_tst.add_argument(
- "-n", "--numb-test", default=100, type=int, help="The number of data for test"
+ "-n",
+ "--numb-test",
+ default=0,
+ type=int,
+ help="The number of data for test. 0 means all data.",
)
parser_tst.add_argument(
"-r", "--rand-seed", type=int, default=None, help="The random seed"
@@ -438,6 +442,12 @@ def main_parser() -> argparse.ArgumentParser:
type=int,
help="The trajectory frequency of the system",
)
+ parser_model_devi.add_argument(
+ "--real_error",
+ action="store_true",
+ default=False,
+ help="Calculate the RMS real error of the model. The real data should be given in the systems.",
+ )
# * convert models
parser_transform = subparsers.add_parser(
diff --git a/doc/_static/logo-dark.svg b/doc/_static/logo-dark.svg
index 471c010a57..3e48cd9341 100644
--- a/doc/_static/logo-dark.svg
+++ b/doc/_static/logo-dark.svg
@@ -1 +1 @@
-
+
diff --git a/doc/_static/logo.svg b/doc/_static/logo.svg
index 59e236df22..f0459c3d23 100644
--- a/doc/_static/logo.svg
+++ b/doc/_static/logo.svg
@@ -1 +1 @@
-
+
diff --git a/doc/credits.rst b/doc/credits.rst
index 5ca03bf68a..fad06e63ba 100644
--- a/doc/credits.rst
+++ b/doc/credits.rst
@@ -42,7 +42,7 @@ Cite DeePMD-kit and methods
Wang_NuclFusion_2022_v62_p126013
-- If attention-based descriptor (`se_atten`) is used,
+- If attention-based descriptor (`se_atten`, `se_atten_v2`) is used,
.. bibliography::
:filter: False
diff --git a/doc/development/create-a-model.md b/doc/development/create-a-model.md
index 69a93973aa..6634403021 100644
--- a/doc/development/create-a-model.md
+++ b/doc/development/create-a-model.md
@@ -16,6 +16,7 @@ After implementation, you need to register the component with a key:
```py
from deepmd.descriptor import Descriptor
+
@Descriptor.register("some_descrpt")
class SomeDescript(Descriptor):
def __init__(self, arg1: bool, arg2: float) -> None:
@@ -32,6 +33,7 @@ from typing import List
from dargs import Argument
from deepmd.utils.argcheck import descrpt_args_plugin
+
@descrpt_args_plugin.register("some_descrpt")
def descrpt_some_args() -> List[Argument]:
return [
@@ -57,11 +59,13 @@ The arguments here should be consistent with the class arguments of your new com
You may use `setuptools` to package new codes into a new Python package. It's crucial to add your new component to `entry_points['deepmd']` in `setup.py`:
```py
- entry_points={
- 'deepmd': [
- 'some_descrpt=deepmd_some_descrtpt:SomeDescript',
+entry_points = (
+ {
+ "deepmd": [
+ "some_descrpt=deepmd_some_descrtpt:SomeDescript",
],
},
+)
```
where `deepmd_some_descrtpt` is the module of your codes. It is equivalent to `from deepmd_some_descrtpt import SomeDescript`.
diff --git a/doc/freeze/compress.md b/doc/freeze/compress.md
index 6d96c7f01c..696d1377bf 100644
--- a/doc/freeze/compress.md
+++ b/doc/freeze/compress.md
@@ -82,7 +82,7 @@ The model compression interface requires the version of DeePMD-kit used in the o
**Acceptable descriptor type**
-Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported.
+Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten_v2` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported.
**Available activation functions for descriptor:**
diff --git a/doc/model/dprc.md b/doc/model/dprc.md
index b665facace..9984ed9b1d 100644
--- a/doc/model/dprc.md
+++ b/doc/model/dprc.md
@@ -57,10 +57,23 @@ As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\te
{ref}`exclude_types ` can be generated by the following Python script:
```py
from itertools import combinations_with_replacement, product
+
qm = (0, 1, 3, 5)
mm = (2, 4)
-print("QM/QM:", list(map(list, list(combinations_with_replacement(mm, 2)) + list(product(qm, mm)))))
-print("QM/MM:", list(map(list, list(combinations_with_replacement(qm, 2)) + list(combinations_with_replacement(mm, 2)))))
+print(
+ "QM/QM:",
+ list(map(list, list(combinations_with_replacement(mm, 2)) + list(product(qm, mm)))),
+)
+print(
+ "QM/MM:",
+ list(
+ map(
+ list,
+ list(combinations_with_replacement(qm, 2))
+ + list(combinations_with_replacement(mm, 2)),
+ )
+ ),
+)
```
Also, DPRc assumes MM atom energies ({ref}`atom_ener `) are zero:
@@ -112,8 +125,7 @@ It is noted that the [`se_atten` descriptor](./train-se-atten.md) should be used
},
"qm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 24,
"rcut_smth": 0.50,
"rcut": 9.00,
@@ -150,8 +162,7 @@ It is noted that the [`se_atten` descriptor](./train-se-atten.md) should be used
},
"qmmm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 27,
"rcut_smth": 0.50,
"rcut": 6.00,
diff --git a/doc/model/index.md b/doc/model/index.md
index c9c25c9812..d649df1442 100644
--- a/doc/model/index.md
+++ b/doc/model/index.md
@@ -5,6 +5,7 @@
- [Descriptor `"se_e2_r"`](train-se-e2-r.md)
- [Descriptor `"se_e3"`](train-se-e3.md)
- [Descriptor `"se_atten"`](train-se-atten.md)
+- [Descriptor `"se_atten_v2"`](train-se-atten.md#descriptor-se_atten_v2)
- [Descriptor `"se_a_mask"`](train-se-a-mask.md)
- [Descriptor `"hybrid"`](train-hybrid.md)
- [Descriptor `sel`](sel.md)
diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md
index 8b006346a9..55bb0458f7 100644
--- a/doc/model/train-se-atten.md
+++ b/doc/model/train-se-atten.md
@@ -55,6 +55,16 @@ An example of the DPA-1 descriptor is provided as follows
* {ref}`attn_mask ` determines whether to mask the diagonal in the attention weights and False is recommended.
* {ref}`attn_dotr ` determines whether to dot the relative coordinates on the attention weights as a gated scheme, True is recommended.
+### Descriptor `"se_atten_v2"`
+We highly recommend using the version 2.0 of the attention-based descriptor `"se_atten_v2"`, which is inherited from `"se_atten"` but with the following parameter modifications:
+```json
+ "stripped_type_embedding": true,
+ "smooth_type_embdding": true,
+ "set_davg_zero": false
+```
+Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`.
+
+
### Fitting `"ener"`
DPA-1 only supports `"ener"` fitting type, and you can refer [here](train-energy.md) for detailed information.
diff --git a/doc/nvnmd/bohrium_download.png b/doc/nvnmd/bohrium_download.png
index 73b8a133ae..efee2cfd41 100644
Binary files a/doc/nvnmd/bohrium_download.png and b/doc/nvnmd/bohrium_download.png differ
diff --git a/doc/nvnmd/bohrium_group.png b/doc/nvnmd/bohrium_group.png
index a14b0563ba..6c5e71576a 100644
Binary files a/doc/nvnmd/bohrium_group.png and b/doc/nvnmd/bohrium_group.png differ
diff --git a/doc/nvnmd/bohrium_job.png b/doc/nvnmd/bohrium_job.png
index 598297056b..040d299525 100644
Binary files a/doc/nvnmd/bohrium_job.png and b/doc/nvnmd/bohrium_job.png differ
diff --git a/doc/nvnmd/bohrium_project.png b/doc/nvnmd/bohrium_project.png
index 28f349cdb3..776b82ab00 100644
Binary files a/doc/nvnmd/bohrium_project.png and b/doc/nvnmd/bohrium_project.png differ
diff --git a/doc/nvnmd/bohrium_status.png b/doc/nvnmd/bohrium_status.png
index 642769a960..19e14118b7 100644
Binary files a/doc/nvnmd/bohrium_status.png and b/doc/nvnmd/bohrium_status.png differ
diff --git a/doc/nvnmd/bohrium_submit.png b/doc/nvnmd/bohrium_submit.png
index 092b03a068..ba71a47155 100644
Binary files a/doc/nvnmd/bohrium_submit.png and b/doc/nvnmd/bohrium_submit.png differ
diff --git a/doc/nvnmd/bohrium_top-up.png b/doc/nvnmd/bohrium_top-up.png
index ce97717fb2..747fea9e5f 100644
Binary files a/doc/nvnmd/bohrium_top-up.png and b/doc/nvnmd/bohrium_top-up.png differ
diff --git a/doc/third-party/lammps-command.md b/doc/third-party/lammps-command.md
index 693fc71306..15acb2e497 100644
--- a/doc/third-party/lammps-command.md
+++ b/doc/third-party/lammps-command.md
@@ -85,6 +85,7 @@ If the keyword `ttm` is set, electronic temperatures from [fix ttm command](http
Only a single `pair_coeff` command is used with the deepmd style which specifies atom names. These are mapped to LAMMPS atom types (integers from 1 to Ntypes) by specifying Ntypes additional arguments after `* *` in the `pair_coeff` command.
If atom names are not set in the `pair_coeff` command, the training parameter {ref}`type_map ` will be used by default.
+If a mapping value is specified as `NULL`, the mapping is not performed. This can be used when a deepmd potential is used as part of the hybrid pair style. The `NULL` values are placeholders for atom types that will be used with other potentials.
If the training parameter {ref}`type_map ` is not set, atom names in the `pair_coeff` command cannot be set. In this case, atom type indexes in [`type.raw`](../data/system.md) (integers from 0 to Ntypes-1) will map to LAMMPS atom types.
Spin is specified by keywords `virtual_len` and `spin_norm`. If the keyword `virtual_len` is set, the distance between virtual atom and its corresponding real atom for each type of magnetic atoms will be fed to the model as the spin parameters. If the keyword `spin_norm` is set, the magnitude of the magnetic moment for each type of magnetic atoms will be fed to the model as the spin parameters.
diff --git a/doc/third-party/out-of-deepmd-kit.md b/doc/third-party/out-of-deepmd-kit.md
index 6cd5769fbf..3d46b53578 100644
--- a/doc/third-party/out-of-deepmd-kit.md
+++ b/doc/third-party/out-of-deepmd-kit.md
@@ -1,35 +1,36 @@
-# Interfaces out of DeePMD-kit
-
-The codes of the following interfaces are not a part of the DeePMD-kit package and maintained by other repositories. We list these interfaces here for user convenience.
-
-## dpdata
-
-[dpdata](https://github.com/deepmodeling/dpdata) provides the `predict` method for `System` class:
-
-```py
-import dpdata
-dsys = dpdata.LabeledSystem('OUTCAR')
-dp_sys = dsys.predict("frozen_model_compressed.pb")
-```
-
-By inferring with the DP model `frozen_model_compressed.pb`, dpdata will generate a new labeled system `dp_sys` with inferred energies, forces, and virials.
-
-## OpenMM plugin for DeePMD-kit
-
-An [OpenMM](https://github.com/openmm/openmm) plugin is provided from [JingHuangLab/openmm_deepmd_plugin](https://github.com/JingHuangLab/openmm_deepmd_plugin), written by the [Huang Lab](http://www.compbiophysics.org/) at Westlake University.
-
-## AMBER interface to DeePMD-kit
-
-An [AMBER](https://ambermd.org/) interface to DeePMD-kit is written by the [York [Lab](https://theory.rutgers.edu/) from Rutgers University. It is open-source at [GitLab RutgersLBSR/AmberDPRc](https://gitlab.com/RutgersLBSR/AmberDPRc/). Details can be found in [this paper](https://doi.org/10.1021/acs.jctc.1c00201).
-
-## DP-GEN
-
-[DP-GEN](https://github.com/deepmodeling/dpgen) provides a workflow to generate accurate DP models by calling DeePMD-kit's command line interface (CLI) in the local or remote server. Details can be found in [this paper](https://doi.org/10.1016/j.cpc.2020.107206).
-
-## MLatom
-
-[Mlatom](http://mlatom.com/) provides an interface to the DeePMD-kit within MLatom's workflow by calling DeePMD-kit's CLI. Details can be found in [this paper](https://doi.org/10.1007/s41061-021-00339-5).
-
-## ABACUS
-
-[ABACUS](https://github.com/deepmodeling/abacus-develop/) can run molecular dynamics with a DP model. User is required to [build ABACUS with DeePMD-kit](https://abacus.deepmodeling.com/en/latest/advanced/install.html#build-with-deepmd-kit).
+# Interfaces out of DeePMD-kit
+
+The codes of the following interfaces are not a part of the DeePMD-kit package and maintained by other repositories. We list these interfaces here for user convenience.
+
+## dpdata
+
+[dpdata](https://github.com/deepmodeling/dpdata) provides the `predict` method for `System` class:
+
+```py
+import dpdata
+
+dsys = dpdata.LabeledSystem("OUTCAR")
+dp_sys = dsys.predict("frozen_model_compressed.pb")
+```
+
+By inferring with the DP model `frozen_model_compressed.pb`, dpdata will generate a new labeled system `dp_sys` with inferred energies, forces, and virials.
+
+## OpenMM plugin for DeePMD-kit
+
+An [OpenMM](https://github.com/openmm/openmm) plugin is provided from [JingHuangLab/openmm_deepmd_plugin](https://github.com/JingHuangLab/openmm_deepmd_plugin), written by the [Huang Lab](http://www.compbiophysics.org/) at Westlake University.
+
+## AMBER interface to DeePMD-kit
+
+An [AMBER](https://ambermd.org/) interface to DeePMD-kit is written by the [York [Lab](https://theory.rutgers.edu/) from Rutgers University. It is open-source at [GitLab RutgersLBSR/AmberDPRc](https://gitlab.com/RutgersLBSR/AmberDPRc/). Details can be found in [this paper](https://doi.org/10.1021/acs.jctc.1c00201).
+
+## DP-GEN
+
+[DP-GEN](https://github.com/deepmodeling/dpgen) provides a workflow to generate accurate DP models by calling DeePMD-kit's command line interface (CLI) in the local or remote server. Details can be found in [this paper](https://doi.org/10.1016/j.cpc.2020.107206).
+
+## MLatom
+
+[Mlatom](http://mlatom.com/) provides an interface to the DeePMD-kit within MLatom's workflow by calling DeePMD-kit's CLI. Details can be found in [this paper](https://doi.org/10.1007/s41061-021-00339-5).
+
+## ABACUS
+
+[ABACUS](https://github.com/deepmodeling/abacus-develop/) can run molecular dynamics with a DP model. User is required to [build ABACUS with DeePMD-kit](https://abacus.deepmodeling.com/en/latest/advanced/install.html#build-with-deepmd-kit).
diff --git a/doc/train/multi-task-training.md b/doc/train/multi-task-training.md
index 4549eeab77..c3cbe98c83 100644
--- a/doc/train/multi-task-training.md
+++ b/doc/train/multi-task-training.md
@@ -27,6 +27,7 @@ The supported descriptors for multi-task mode are listed:
- {ref}`se_r (se_e2_r) `
- {ref}`se_at (se_e3) `
- {ref}`se_atten `
+- {ref}`se_atten_v2 `
- {ref}`hybrid `
The supported fitting nets for multi-task mode are listed:
diff --git a/doc/train/training.md b/doc/train/training.md
index a4afda73c2..c1e5b89a84 100644
--- a/doc/train/training.md
+++ b/doc/train/training.md
@@ -46,12 +46,12 @@ import matplotlib.pyplot as plt
data = np.genfromtxt("lcurve.out", names=True)
for name in data.dtype.names[1:-1]:
- plt.plot(data['step'], data[name], label=name)
+ plt.plot(data["step"], data[name], label=name)
plt.legend()
-plt.xlabel('Step')
-plt.ylabel('Loss')
-plt.xscale('symlog')
-plt.yscale('log')
+plt.xlabel("Step")
+plt.ylabel("Loss")
+plt.xscale("symlog")
+plt.yscale("log")
plt.grid()
plt.show()
```
diff --git a/examples/dos/train/input.json b/examples/dos/train/input.json
index 3094814055..f2094c18a6 100644
--- a/examples/dos/train/input.json
+++ b/examples/dos/train/input.json
@@ -69,5 +69,5 @@
"batch_size": 1
}
},
- "_comment": "that's all"
+ "_comment1": "that's all"
}
diff --git a/examples/dprc/generalized_force/input.json b/examples/dprc/generalized_force/input.json
index 01f2a52a6d..80a9ebeb1b 100644
--- a/examples/dprc/generalized_force/input.json
+++ b/examples/dprc/generalized_force/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"C",
@@ -87,8 +87,7 @@
"seed": 1
},
{
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 150,
"rcut_smth": 5.80,
"rcut": 6.00,
@@ -185,7 +184,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment2": "that's all"
},
"loss": {
@@ -199,7 +198,7 @@
"start_pref_gf": 10,
"limit_pref_gf": 100,
"numb_generalized_coord": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"training": {
@@ -208,15 +207,15 @@
"../data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment4": "that's all"
},
"numb_steps": 10000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
- "_comment": "that's all"
+ "_comment6": "that's all"
}
diff --git a/examples/dprc/normal/input.json b/examples/dprc/normal/input.json
index d35b48d12b..eba044fd43 100644
--- a/examples/dprc/normal/input.json
+++ b/examples/dprc/normal/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"C",
@@ -87,8 +87,7 @@
"seed": 1
},
{
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 150,
"rcut_smth": 5.80,
"rcut": 6.00,
@@ -185,7 +184,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment2": "that's all"
},
"loss": {
@@ -196,7 +195,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"training": {
@@ -205,15 +204,15 @@
"../data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment4": "that's all"
},
"numb_steps": 1000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
- "_comment": "that's all"
+ "_comment6": "that's all"
}
diff --git a/examples/dprc/pairwise/input.json b/examples/dprc/pairwise/input.json
index 675418f7d5..c9ebae0f43 100644
--- a/examples/dprc/pairwise/input.json
+++ b/examples/dprc/pairwise/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type": "pairwise_dprc",
"type_map": [
@@ -18,8 +18,7 @@
},
"qm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 24,
"rcut_smth": 0.50,
"rcut": 9.00,
@@ -56,8 +55,7 @@
},
"qmmm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 27,
"rcut_smth": 0.50,
"rcut": 6.00,
@@ -153,7 +151,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment2": "that's all"
},
"loss": {
@@ -164,7 +162,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"training": {
@@ -173,15 +171,15 @@
"../data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment4": "that's all"
},
"numb_steps": 1000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
- "_comment": "that's all"
+ "_comment6": "that's all"
}
diff --git a/examples/fparam/train/input.json b/examples/fparam/train/input.json
index 1659bea1ab..a81051f459 100644
--- a/examples/fparam/train/input.json
+++ b/examples/fparam/train/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"data_stat_nbatch": 1,
"descriptor": {
@@ -47,7 +47,7 @@
"decay_steps": 5000
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -61,8 +61,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
@@ -73,5 +73,5 @@
"profiling_file": "timeline.json"
},
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/examples/fparam/train/input_aparam.json b/examples/fparam/train/input_aparam.json
index 3e251f68df..fdc53706b9 100644
--- a/examples/fparam/train/input_aparam.json
+++ b/examples/fparam/train/input_aparam.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"data_stat_nbatch": 1,
"descriptor": {
@@ -47,7 +47,7 @@
"decay_steps": 5000
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -61,8 +61,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
@@ -73,5 +73,5 @@
"profiling_file": "timeline.json"
},
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/examples/nopbc/mixed/input.json b/examples/nopbc/mixed/input.json
index b905318b74..0e6fbfef57 100644
--- a/examples/nopbc/mixed/input.json
+++ b/examples/nopbc/mixed/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"C",
@@ -20,7 +20,7 @@
"axis_neuron": 12,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -31,9 +31,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -41,7 +41,7 @@
"decay_steps": 4000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -52,22 +52,22 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
"training_data": {
"systems": "../data/",
"batch_size": "mixed:4",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"numb_steps": 4000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
- "_comment": "that's all"
+ "_comment9": "that's all"
}
diff --git a/examples/nopbc/train/input.json b/examples/nopbc/train/input.json
index 43669ad45c..2c33791d45 100644
--- a/examples/nopbc/train/input.json
+++ b/examples/nopbc/train/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"C",
@@ -23,7 +23,7 @@
"resnet_dt": false,
"axis_neuron": 12,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -33,9 +33,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -43,7 +43,7 @@
"decay_steps": 4000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -54,22 +54,22 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
"training_data": {
"systems": "../data/",
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"numb_steps": 4000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
- "_comment": "that's all"
+ "_comment9": "that's all"
}
diff --git a/examples/spin/se_e2_a/input.json b/examples/spin/se_e2_a/input.json
index 09d95a913b..f9e0988163 100644
--- a/examples/spin/se_e2_a/input.json
+++ b/examples/spin/se_e2_a/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"Ni",
@@ -22,7 +22,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -33,7 +33,7 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"spin": {
"use_spin": [
@@ -46,9 +46,9 @@
"spin_norm": [
1.2737
],
- "_comment": " that's all"
+ "_comment4": " that's all"
},
- "_comment": " that's all"
+ "_comment5": " that's all"
},
"learning_rate": {
@@ -56,7 +56,7 @@
"decay_steps": 10000,
"start_lr": 1.00e-3,
"stop_lr": 5.92e-6,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"loss": {
@@ -69,7 +69,7 @@
"limit_pref_fm": 10.0,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment7": " that's all"
},
"training": {
@@ -78,7 +78,7 @@
"../data/data_0/"
],
"batch_size": 1,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -86,15 +86,15 @@
],
"batch_size": 1,
"numb_btch": 10,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 1000000,
"seed": 1,
"disp_file": "lcurve.out",
"disp_freq": 5000,
"save_freq": 10000,
- "_comment": "that's all"
+ "_comment10": "that's all"
},
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/examples/water/dplr/train/dw.json b/examples/water/dplr/train/dw.json
index b3f5d87f8d..401e6272f5 100644
--- a/examples/water/dplr/train/dw.json
+++ b/examples/water/dplr/train/dw.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -22,7 +22,7 @@
"axis_neuron": 8,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"type": "dipole",
@@ -37,9 +37,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -47,17 +47,17 @@
"start_lr": 0.01,
"stop_lr": 1e-7,
"decay_steps": 5000,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
"type": "tensor",
"pref": 0.0,
"pref_atomic": 1.0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
- "_comment": " traing controls",
+ "_comment7": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -65,22 +65,22 @@
],
"batch_size": "auto",
"set_prefix": "set",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 2000,
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment9": " display and restart",
+ "_comment10": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 500,
"save_ckpt": "model.ckpt",
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment11": "that's all"
},
- "_comment": "that's all"
+ "_comment12": "that's all"
}
diff --git a/examples/water/dplr/train/ener.json b/examples/water/dplr/train/ener.json
index 3ff59a73b4..7b47bfda55 100644
--- a/examples/water/dplr/train/ener.json
+++ b/examples/water/dplr/train/ener.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -22,7 +22,7 @@
"axis_neuron": 8,
"precision": "float64",
"seed": 3458359619,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -33,7 +33,7 @@
"resnet_dt": true,
"precision": "float64",
"seed": 108835393,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"modifier": {
"type": "dipole_charge",
@@ -48,7 +48,7 @@
"ewald_h": 1.00,
"ewald_beta": 0.40
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -56,7 +56,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 1.0e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -67,7 +67,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -76,15 +76,15 @@
"data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"numb_steps": 2000,
"seed": 4266336533,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 500,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
- "_comment": "that's all"
+ "_comment9": "that's all"
}
diff --git a/examples/water/hybrid/input.json b/examples/water/hybrid/input.json
index 7bf6bdee0d..2315d26444 100644
--- a/examples/water/hybrid/input.json
+++ b/examples/water/hybrid/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -25,7 +25,7 @@
"axis_neuron": 4,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
{
"type": "se_e2_r",
@@ -43,7 +43,7 @@
"resnet_dt": false,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
}
]
},
@@ -55,9 +55,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment4": " that's all"
},
- "_comment": " that's all"
+ "_comment5": " that's all"
},
"learning_rate": {
@@ -65,7 +65,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment6": "that's all"
},
"loss": {
@@ -76,7 +76,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment7": " that's all"
},
"training": {
@@ -87,7 +87,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -95,15 +95,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment10": "that's all"
},
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/examples/water/se_atten/input.json b/examples/water/se_atten/input.json
index fc29807666..bfd6d48c60 100644
--- a/examples/water/se_atten/input.json
+++ b/examples/water/se_atten/input.json
@@ -1,13 +1,12 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
"H"
],
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 120,
"rcut_smth": 0.50,
"rcut": 6.00,
@@ -24,7 +23,7 @@
"attn_dotr": true,
"attn_mask": false,
"precision": "float64",
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -35,9 +34,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -45,7 +44,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -56,7 +55,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -67,7 +66,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -75,15 +74,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_atten_compressible/input.json b/examples/water/se_atten_compressible/input.json
index b98b622bca..e8d3f7ea32 100644
--- a/examples/water/se_atten_compressible/input.json
+++ b/examples/water/se_atten_compressible/input.json
@@ -1,13 +1,12 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
"H"
],
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 120,
"rcut_smth": 0.50,
"rcut": 6.00,
@@ -24,7 +23,7 @@
"attn_dotr": true,
"attn_mask": false,
"precision": "float64",
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -35,9 +34,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -45,7 +44,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -56,7 +55,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -67,7 +66,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -75,15 +74,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_atten_dpa1_compat/input.json b/examples/water/se_atten_dpa1_compat/input.json
index 8bd24df258..90c597e586 100644
--- a/examples/water/se_atten_dpa1_compat/input.json
+++ b/examples/water/se_atten_dpa1_compat/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -24,7 +24,7 @@
"attn_dotr": true,
"attn_mask": false,
"precision": "float64",
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -35,9 +35,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -45,7 +45,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -56,7 +56,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -67,7 +67,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -75,15 +75,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_e2_a/input.json b/examples/water/se_e2_a/input.json
index 42200f1162..46c38ba834 100644
--- a/examples/water/se_e2_a/input.json
+++ b/examples/water/se_e2_a/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -22,7 +22,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -33,9 +33,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -43,7 +43,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -54,7 +54,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -65,7 +65,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -73,15 +73,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_e2_a_mixed_prec/input.json b/examples/water/se_e2_a_mixed_prec/input.json
index e7ded27b76..0382b80b30 100644
--- a/examples/water/se_e2_a_mixed_prec/input.json
+++ b/examples/water/se_e2_a_mixed_prec/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -21,7 +21,7 @@
"resnet_dt": false,
"axis_neuron": 16,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -31,9 +31,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -41,7 +41,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -52,7 +52,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -63,7 +63,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -71,7 +71,7 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"mixed_precision": {
"compute_prec": "float16",
@@ -82,8 +82,8 @@
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_e2_a_tebd/input.json b/examples/water/se_e2_a_tebd/input.json
index 0d7e1a6a3e..101c1a7a4f 100644
--- a/examples/water/se_e2_a_tebd/input.json
+++ b/examples/water/se_e2_a_tebd/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -33,7 +33,7 @@
"type_one_side": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -44,9 +44,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -54,7 +54,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -65,7 +65,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -76,7 +76,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -84,15 +84,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water/se_e2_r/input.json b/examples/water/se_e2_r/input.json
index 41cccb4073..7fdd1835c6 100644
--- a/examples/water/se_e2_r/input.json
+++ b/examples/water/se_e2_r/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -21,7 +21,7 @@
"resnet_dt": false,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -32,9 +32,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": "that's all"
+ "_comment3": "that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -42,7 +42,7 @@
"decay_steps": 5000,
"start_lr": 0.005,
"stop_lr": 3.51e-8,
- "_comment": " that's all"
+ "_comment5": " that's all"
},
"loss": {
@@ -52,10 +52,10 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
- "_comment": " traing controls",
+ "_comment7": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -64,7 +64,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -72,15 +72,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 1000000,
"seed": 1,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment10": "that's all"
},
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/examples/water/se_e3/input.json b/examples/water/se_e3/input.json
index d96b07820d..df8dacfe9c 100644
--- a/examples/water/se_e3/input.json
+++ b/examples/water/se_e3/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -18,7 +18,7 @@
"resnet_dt": false,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -29,9 +29,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -39,7 +39,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -49,10 +49,10 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
- "_comment": " traing controls",
+ "_comment7": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -61,7 +61,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -69,13 +69,13 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 1000000,
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment10": " display and restart",
+ "_comment11": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 10,
"save_freq": 1000,
@@ -84,8 +84,8 @@
"time_training": true,
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment12": "that's all"
},
- "_comment": "that's all"
+ "_comment13": "that's all"
}
diff --git a/examples/water/zbl/input.json b/examples/water/zbl/input.json
index ceb00b448c..180a6cc8b5 100644
--- a/examples/water/zbl/input.json
+++ b/examples/water/zbl/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"use_srtab": "H2O_tab_potential.txt",
"smin_alpha": 0.1,
@@ -26,7 +26,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -37,9 +37,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -47,7 +47,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -58,7 +58,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -69,7 +69,7 @@
"../data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -77,15 +77,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1000000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/examples/water_multi_task/ener_dipole/input.json b/examples/water_multi_task/ener_dipole/input.json
index c2aea23e13..9d00adac2e 100644
--- a/examples/water_multi_task/ener_dipole/input.json
+++ b/examples/water_multi_task/ener_dipole/input.json
@@ -1,5 +1,5 @@
{
- "_comment": "that's all",
+ "_comment1": "that's all",
"model": {
"type_map": [
"O",
@@ -22,7 +22,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net_dict": {
"water_dipole": {
@@ -38,7 +38,7 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
"water_ener": {
"neuron": [
@@ -49,24 +49,24 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment4": " that's all"
}
},
- "_comment": " that's all"
+ "_comment5": " that's all"
},
"learning_rate": {
"type": "exp",
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-08,
- "_comment": "that's all"
+ "_comment6": "that's all"
},
"loss_dict": {
"water_dipole": {
"type": "tensor",
"pref": 1.0,
"pref_atomic": 1.0,
- "_comment": " that's all"
+ "_comment7": " that's all"
},
"water_ener": {
"type": "ener",
@@ -76,7 +76,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment8": " that's all"
}
},
"training": {
@@ -88,7 +88,7 @@
"../../water_tensor/dipole/training_data/global_system"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"validation_data": {
"systems": [
@@ -97,7 +97,7 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment10": "that's all"
}
},
"water_ener": {
@@ -108,7 +108,7 @@
"../../water/data/data_2/"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment11": "that's all"
},
"validation_data": {
"systems": [
@@ -116,7 +116,7 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment12": "that's all"
}
}
},
@@ -129,6 +129,6 @@
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment13": "that's all"
}
}
diff --git a/examples/water_tensor/dipole/dipole_input.json b/examples/water_tensor/dipole/dipole_input.json
index f849279232..b42b9b8465 100644
--- a/examples/water_tensor/dipole/dipole_input.json
+++ b/examples/water_tensor/dipole/dipole_input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -22,7 +22,7 @@
"axis_neuron": 6,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"type": "dipole",
@@ -37,23 +37,23 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
"type": "exp",
"start_lr": 0.01,
"decay_steps": 5000,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
"type": "tensor",
"pref": 1.0,
"pref_atomic": 1.0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
- "_comment": " traing controls",
+ "_comment7": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -61,7 +61,7 @@
"./training_data/global_system"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -70,14 +70,14 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 2000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment10": "that's all"
},
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/examples/water_tensor/polar/polar_input.json b/examples/water_tensor/polar/polar_input.json
index 4a031f24c4..ca53182e79 100644
--- a/examples/water_tensor/polar/polar_input.json
+++ b/examples/water_tensor/polar/polar_input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -23,7 +23,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"type": "polar",
@@ -39,9 +39,9 @@
"resnet_dt": true,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -49,16 +49,16 @@
"decay_steps": 5000,
"start_lr": 0.01,
"stop_lr": 3.51e-7,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
"type": "tensor",
"pref_atomic": 1.0,
"pref": 1.0,
- "_comment": "that's all"
+ "_comment6": "that's all"
},
- "_comment": " traing controls",
+ "_comment7": " traing controls",
"training": {
"training_data": {
"systems": [
@@ -66,7 +66,7 @@
"./training_data/global_system"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"validation_data": {
"systems": [
@@ -75,15 +75,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
"numb_steps": 2000,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 100,
"save_freq": 1000,
- "_comment": "that's all"
+ "_comment10": "that's all"
},
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/examples/zinc_protein/zinc_se_a_mask.json b/examples/zinc_protein/zinc_se_a_mask.json
index 013c24a239..b23987cf5d 100644
--- a/examples/zinc_protein/zinc_se_a_mask.json
+++ b/examples/zinc_protein/zinc_se_a_mask.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"C",
@@ -29,7 +29,7 @@
"axis_neuron": 16,
"precision": "float64",
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -42,9 +42,9 @@
"seed": 1,
"numb_aparam": 1,
"use_aparam_as_mask": true,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -52,7 +52,7 @@
"decay_steps": 20000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -63,7 +63,7 @@
"limit_pref_f": 0.0,
"start_pref_pf": 1.0,
"limit_pref_pf": 1.0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
"training_data": {
@@ -71,14 +71,14 @@
"example/zinc_protein/train_data_dp_mask/"
],
"batch_size": 2,
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
"example/zinc_protein/val_data_dp_mask/"
],
"batch_size": 2,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1,
"seed": 10,
@@ -87,7 +87,7 @@
"tensorboard": true,
"tensorboard_log_dir": "log4tensorboard",
"tensorboard_freq": 100,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/pyproject.toml b/pyproject.toml
index 1f56b08e3e..687e0284cc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -55,7 +55,7 @@ keywords = ["deepmd"]
[project.urls]
Homepage = "https://github.com/deepmodeling/deepmd-kit"
-documentation = "https://docs.deepmodeling.com/projects/deepmd-kit"
+documentation = "https://docs.deepmodeling.com/projects/deepmd"
repository = "https://github.com/deepmodeling/deepmd-kit"
[tool.setuptools_scm]
diff --git a/source/install/build_tf.py b/source/install/build_tf.py
index db72bebb8b..043c4c6c81 100755
--- a/source/install/build_tf.py
+++ b/source/install/build_tf.py
@@ -150,9 +150,9 @@ def __call__(self):
self.download()
if not self.exists:
raise RuntimeError(
- "Download {} from {} failed! "
- "You can manually download it to {} and "
- "retry the script.".format(self.filename, self.url, str(self.path))
+ f"Download {self.filename} from {self.url} failed! "
+ f"You can manually download it to {str(self.path)} and "
+ "retry the script."
)
self.post_process()
diff --git a/source/lib/src/cuda/tabulate.cu b/source/lib/src/cuda/tabulate.cu
index bb05fd59a4..92f77ed63b 100644
--- a/source/lib/src/cuda/tabulate.cu
+++ b/source/lib/src/cuda/tabulate.cu
@@ -159,7 +159,7 @@ __global__ void tabulate_fusion_se_a_fifth_order_polynomial(
if (enable_se_atten) {
FPTYPE t = two_embed[block_idx * nnei * last_layer_size +
ii * last_layer_size + thread_idx];
- res = res * t + t;
+ res = res * t + res;
}
for (int kk = 0; kk < MTILE; kk++) {
@@ -239,7 +239,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial(
if (enable_se_atten) {
t = two_embed[block_idx * nnei * last_layer_size +
ii * last_layer_size + jj];
- res = res * t + t;
+ res = res * t + res;
}
for (int kk = 0; kk < MTILE; kk++) {
@@ -255,7 +255,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial(
(var[1] + (2 * var[2] +
(3 * var[3] + (4 * var[4] + 5 * var[5] * xx) * xx) * xx) *
xx) *
- (enable_se_atten ? res * t : res);
+ (enable_se_atten ? res * t + res : res);
}
__syncwarp();
for (int kk = 0; kk < MTILE; kk++) {
diff --git a/source/lib/src/rocm/tabulate.hip.cu b/source/lib/src/rocm/tabulate.hip.cu
index 6ae21d4e0b..f88ae6ec4a 100644
--- a/source/lib/src/rocm/tabulate.hip.cu
+++ b/source/lib/src/rocm/tabulate.hip.cu
@@ -125,7 +125,7 @@ __global__ void tabulate_fusion_se_a_fifth_order_polynomial(
if (enable_se_atten) {
FPTYPE t = two_embed[block_idx * nnei * last_layer_size +
ii * last_layer_size + thread_idx];
- res = res * t + t;
+ res = res * t + res;
}
for (int kk = 0; kk < MTILE; kk++) {
@@ -206,7 +206,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial(
if (enable_se_atten) {
t = two_embed[block_idx * nnei * last_layer_size +
ii * last_layer_size + jj];
- res = res * t + t;
+ res = res * t + res;
}
for (int kk = 0; kk < KTILE; kk++) {
@@ -228,7 +228,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial(
((FPTYPE)4. * var[4] + (FPTYPE)5. * var[5] * xx) * xx) *
xx) *
xx) *
- (enable_se_atten ? res * t : res);
+ (enable_se_atten ? res * t + res : res);
}
//__syncwarp();->syncwrap
__syncthreads();
diff --git a/source/lib/src/tabulate.cc b/source/lib/src/tabulate.cc
index 377e6d06db..1cafd36ee2 100644
--- a/source/lib/src/tabulate.cc
+++ b/source/lib/src/tabulate.cc
@@ -125,7 +125,7 @@ void deepmd::tabulate_fusion_se_a_cpu(FPTYPE* out,
if (enable_se_atten) {
FPTYPE t = two_embed[ii * nnei * last_layer_size +
jj * last_layer_size + kk];
- var = var * t + t;
+ var = var * t + var;
}
if (unloop) {
@@ -215,8 +215,8 @@ void deepmd::tabulate_fusion_se_a_grad_cpu(FPTYPE* dy_dem_x,
if (enable_se_atten) {
FPTYPE t = two_embed[ii * nnei * last_layer_size +
jj * last_layer_size + kk];
- res = res * t + t;
- g = t * g;
+ res = res * t + res;
+ g += t * g;
}
if (unloop) {
diff --git a/source/lib/tests/test_tabulate_se_a.cc b/source/lib/tests/test_tabulate_se_a.cc
index 812f141c53..4c87a24566 100644
--- a/source/lib/tests/test_tabulate_se_a.cc
+++ b/source/lib/tests/test_tabulate_se_a.cc
@@ -649,47 +649,47 @@ class TestTabulateSeA : public ::testing::Test {
0.7906075239905527, 0.9325509002602962, 0.44489583733770977,
0.5194672674960213, 0.04635102497306032};
std::vector expected_xyz_scatter_with_two_embed = {
- 0.52052723, -0.09162471, 0.00134297, 0.36123220, 0.53111037, -0.13234888,
- 0.14547290, -0.07760225, 0.39022224, -0.05084421, 0.00097584, 0.40299126,
- 0.46399123, -0.08226800, 0.10496569, -0.05697074, 0.22831256, -0.03236614,
- 0.00053069, 0.20158495, 0.25562350, -0.05008332, 0.05120581, -0.03847130,
- 0.48372134, -0.08635551, 0.00127416, 0.33553740, 0.48870492, -0.12377265,
- 0.14484348, -0.06894720, 0.09037214, -0.08453476, 0.00132198, 0.48978396,
- 0.56809636, -0.11217096, 0.06433977, -0.14684460, 0.04360697, -0.10371710,
- 0.00105878, 0.36754591, 0.30793639, -0.05068795, 0.10760094, -0.09443281,
- 0.03027943, -0.05996609, 0.00096647, 0.29035073, 0.38261345, -0.05492907,
- 0.06442101, -0.09657691, 0.06211007, -0.06585386, 0.00121839, 0.38826136,
- 0.62888512, -0.12626045, 0.13760707, -0.15409971, 0.35496658, -0.09629124,
- 0.00094936, 0.37091455, 0.40029681, -0.11266203, 0.16359092, -0.09316170,
- 0.46220540, -0.11194036, 0.00099500, 0.48276205, 0.60272823, -0.14764563,
- 0.23083357, -0.14624559, 0.42737683, -0.12200031, 0.00101439, 0.38477701,
- 0.40109773, -0.14840970, 0.18621217, -0.08161032, 0.19972406, -0.03173626,
- 0.00020518, 0.11990507, 0.18933921, -0.05445546, 0.10560781, -0.04502732,
- 0.43067025, -0.06062120, 0.00243902, 0.29049015, 0.50532163, -0.17136189,
- 0.24445441, -0.10595959, 0.24687635, -0.06109584, 0.00155267, 0.31006293,
- 0.46913106, -0.14140409, 0.16395873, -0.04099833, 0.18986012, -0.05425738,
- 0.00104613, 0.27541918, 0.46324668, -0.11304375, 0.14316380, -0.03016295,
- 0.30689686, -0.03886867, 0.00133389, 0.17385121, 0.33724930, -0.13383926,
- 0.18766727, -0.07051321,
+ 0.473477, -0.938671, -0.566066, 0.243465, 0.820256, -1.03738,
+ -0.1701, -0.871079, 0.395671, -0.716406, -0.518235, 0.240325,
+ 0.729525, -0.821716, -0.146507, -0.743918, 0.228466, -0.423251,
+ -0.285282, 0.129922, 0.412289, -0.479284, -0.079999, -0.444168,
+ 0.434929, -0.866715, -0.520729, 0.22332, 0.749817, -0.954691,
+ -0.159364, -0.787858, 0.346693, -0.994381, -0.627092, 0.29814,
+ 0.919532, -1.07189, -0.156985, -1.17893, 0.288148, -0.988518,
+ -0.574846, 0.245017, 0.696643, -0.791873, -0.158166, -0.923282,
+ 0.223783, -0.685911, -0.496191, 0.192425, 0.622787, -0.666019,
+ -0.116214, -0.792771, 0.331461, -0.910623, -0.697545, 0.270823,
+ 0.943644, -1.10325, -0.186431, -1.19188, 0.379218, -0.896241,
+ -0.500721, 0.229773, 0.689497, -0.908129, -0.166598, -0.849655,
+ 0.5138, -1.15374, -0.656861, 0.31042, 0.981434, -1.23231,
+ -0.233144, -1.2304, 0.45798, -1.10749, -0.604885, 0.261653,
+ 0.78797, -1.13627, -0.19813, -0.931011, 0.213153, -0.412743,
+ -0.258851, 0.106663, 0.367201, -0.484, -0.10065, -0.45465,
+ 0.471219, -0.881373, -0.715617, 0.24295, 0.869534, -1.23335,
+ -0.221008, -1.03448, 0.354185, -0.78891, -0.632616, 0.224055,
+ 0.759759, -1.03588, -0.175253, -0.726695, 0.293533, -0.680751,
+ -0.515893, 0.193818, 0.681708, -0.85917, -0.150909, -0.603596,
+ 0.336416, -0.611786, -0.478383, 0.164921, 0.608194, -0.915656,
+ -0.162872, -0.723229,
};
std::vector expected_dy_dem_x_with_two_embed = {
- -0.00876744, -0.05693831, 0.00251697, -0.04104716,
- -0.07540836, -0.01255790, -0.00225420, -0.04262001,
- -0.09707657, -0.00396457, -0.00492498, -0.03225818,
- -0.04350615, -0.02865961, -0.00690901, -0.00847689,
+ -0.0294449, -0.0948144, -0.039285, -0.0826351, -0.114794, -0.0530287,
+ -0.0404511, -0.0964537, -0.148872, -0.0394916, -0.0330467, -0.0767711,
+ -0.0893684, -0.0666033, -0.0360863, -0.0532634,
+
};
std::vector expected_dy_dem_with_two_embed = {
- 1.88405505, 1.88405505, 1.88405505, 1.88405505, 3.33316973, 3.33316973,
- 3.33316973, 3.33316973, 3.48744961, 3.48744961, 3.48744961, 3.48744961,
- 2.66470385, 2.66470385, 2.66470385, 2.66470385, 2.55291404, 2.55291404,
- 2.55291404, 2.55291404, 2.23471214, 2.23471214, 2.23471214, 2.23471214,
- 0.79636719, 0.79636719, 0.79636719, 0.79636719, 1.16809199, 1.16809199,
- 1.16809199, 1.16809199, 2.55088185, 2.55088185, 2.55088185, 2.55088185,
- 3.02601696, 3.02601696, 3.02601696, 3.02601696, 2.80271925, 2.80271925,
- 2.80271925, 2.80271925, 1.67897971, 1.67897971, 1.67897971, 1.67897971,
- 1.20938629, 1.20938629, 1.20938629, 1.20938629, 2.05980066, 2.05980066,
- 2.05980066, 2.05980066, 3.10625208, 3.10625208, 3.10625208, 3.10625208,
- 3.35169350, 3.35169350, 3.35169350, 3.35169350,
+ -3.43749, -3.43749, -3.43749, -3.43749, -4.91768, -4.91768, -4.91768,
+ -4.91768, -3.79784, -3.79784, -3.79784, -3.79784, -5.39484, -5.39484,
+ -5.39484, -5.39484, -4.91866, -4.91866, -4.91866, -4.91866, -5.40551,
+ -5.40551, -5.40551, -5.40551, -5.56471, -5.56471, -5.56471, -5.56471,
+ -4.87029, -4.87029, -4.87029, -4.87029, -5.71263, -5.71263, -5.71263,
+ -5.71263, -4.27996, -4.27996, -4.27996, -4.27996, -5.55454, -5.55454,
+ -5.55454, -5.55454, -5.07277, -5.07277, -5.07277, -5.07277, -5.96781,
+ -5.96781, -5.96781, -5.96781, -5.08156, -5.08156, -5.08156, -5.08156,
+ -5.23843, -5.23843, -5.23843, -5.23843, -3.90654, -3.90654, -3.90654,
+ -3.90654,
+
};
const int nloc = 4;
const int nnei = 4;
diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp
index e60655decc..f0e0f23096 100644
--- a/source/lmp/pair_deepmd.cpp
+++ b/source/lmp/pair_deepmd.cpp
@@ -1153,6 +1153,10 @@ void PairDeepMD::coeff(int narg, char **arg) {
break;
}
}
+ if (!found_element && "NULL" == type_name) {
+ type_idx_map.push_back(type_map.size()); // ghost type
+ found_element = true;
+ }
if (!found_element) {
error->all(FLERR, "pair_coeff: element " + type_name +
" not found in the model");
diff --git a/source/tests/compat_inputs/water_se_a_v0.json b/source/tests/compat_inputs/water_se_a_v0.json
index e187b54eca..be313266e2 100644
--- a/source/tests/compat_inputs/water_se_a_v0.json
+++ b/source/tests/compat_inputs/water_se_a_v0.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"use_smooth": true,
"sel_a": [
46,
@@ -23,7 +23,7 @@
"coord_norm": true,
"type_fitting_net": false,
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"../data/"
],
@@ -45,8 +45,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 10,
@@ -57,5 +57,5 @@
"profiling": true,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/compat_inputs/water_v0.json b/source/tests/compat_inputs/water_v0.json
index 1ed580a2ae..385363ce80 100644
--- a/source/tests/compat_inputs/water_v0.json
+++ b/source/tests/compat_inputs/water_v0.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"use_smooth": false,
"sel_a": [
16,
@@ -24,12 +24,12 @@
1,
0
],
- "_comment": " default rule: []",
- "_comment": " user defined rule: for each type provides two axes, ",
- "_comment": " for each axis: (a_or_r, type, idx)",
- "_comment": " if type < 0, exclude type -(type+1)",
- "_comment": " for water (O:0, H:1) it can be",
- "_comment": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]",
+ "_comment2": " default rule: []",
+ "_comment3": " user defined rule: for each type provides two axes, ",
+ "_comment4": " for each axis: (a_or_r, type, idx)",
+ "_comment5": " if type < 0, exclude type -(type+1)",
+ "_comment6": " for water (O:0, H:1) it can be",
+ "_comment7": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]",
"fitting_neuron": [
240,
120,
@@ -38,7 +38,7 @@
10
],
- "_comment": " traing controls",
+ "_comment8": " traing controls",
"systems": [
"../data/"
],
@@ -60,8 +60,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment9": " display and restart",
+ "_comment10": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 10,
@@ -70,5 +70,5 @@
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/source/tests/data_modifier/dipole.json b/source/tests/data_modifier/dipole.json
index 262b29b60a..ac9cec0570 100644
--- a/source/tests/data_modifier/dipole.json
+++ b/source/tests/data_modifier/dipole.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -35,7 +35,7 @@
"resnet_dt": true,
"seed": 1
},
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"learning_rate": {
@@ -43,17 +43,17 @@
"start_lr": 0.01,
"stop_lr": 1e-8,
"decay_steps": 5000,
- "_comment": "that's all"
+ "_comment3": "that's all"
},
"loss": {
"type": "tensor",
"pref": 1.0,
"pref_atomic": 1.0,
- "_comment": " that's all"
+ "_comment4": " that's all"
},
- "_comment": " traing controls",
+ "_comment5": " traing controls",
"training": {
"systems": [
"data_modifier/sys_10"
@@ -64,8 +64,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment6": " display and restart",
+ "_comment7": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 5,
@@ -74,8 +74,8 @@
"load_ckpt": "model.ckpt",
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
- "_comment": "that's all"
+ "_comment9": "that's all"
}
diff --git a/source/tests/finetune/input_finetune.json b/source/tests/finetune/input_finetune.json
index 0b51b5c4d5..daf8be4dda 100644
--- a/source/tests/finetune/input_finetune.json
+++ b/source/tests/finetune/input_finetune.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_embedding": {
"trainable": false
@@ -9,12 +9,12 @@
"H"
],
"descriptor": {
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -22,7 +22,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -33,7 +33,7 @@
"limit_pref_f": 1,
"start_pref_v": 1,
"limit_pref_v": 1,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -42,7 +42,7 @@
"finetune/data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -50,15 +50,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 0,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 1,
"save_freq": 1,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/source/tests/finetune/input_pretrain.json b/source/tests/finetune/input_pretrain.json
index 1b1f50752b..58fd8b120c 100644
--- a/source/tests/finetune/input_pretrain.json
+++ b/source/tests/finetune/input_pretrain.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"H",
@@ -24,7 +24,7 @@
"attn_dotr": true,
"attn_mask": false,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -34,9 +34,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -44,7 +44,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -55,7 +55,7 @@
"limit_pref_f": 1,
"start_pref_v": 1,
"limit_pref_v": 1,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -64,7 +64,7 @@
"finetune/data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -72,15 +72,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 1,
"save_freq": 1,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/source/tests/init_frz_model/input.json b/source/tests/init_frz_model/input.json
index 7461b8e7c4..bab608680f 100644
--- a/source/tests/init_frz_model/input.json
+++ b/source/tests/init_frz_model/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -21,7 +21,7 @@
"resnet_dt": false,
"axis_neuron": 16,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -31,9 +31,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -41,7 +41,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -52,7 +52,7 @@
"limit_pref_f": 1,
"start_pref_v": 1,
"limit_pref_v": 1,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -61,7 +61,7 @@
"init_frz_model/data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -69,15 +69,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 1,
"save_freq": 1,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/source/tests/model_compression/input.json b/source/tests/model_compression/input.json
index 2b3c2af542..be64bfb2cf 100644
--- a/source/tests/model_compression/input.json
+++ b/source/tests/model_compression/input.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -21,7 +21,7 @@
"resnet_dt": false,
"axis_neuron": 16,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -31,9 +31,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
@@ -41,7 +41,7 @@
"decay_steps": 5000,
"start_lr": 0.001,
"stop_lr": 3.51e-8,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
@@ -52,7 +52,7 @@
"limit_pref_f": 1,
"start_pref_v": 0,
"limit_pref_v": 0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
@@ -61,7 +61,7 @@
"model_compression/data"
],
"batch_size": "auto",
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -69,15 +69,15 @@
],
"batch_size": 1,
"numb_btch": 3,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1,
"seed": 10,
"disp_file": "lcurve.out",
"disp_freq": 1,
"save_freq": 1,
- "_comment": "that's all"
+ "_comment9": "that's all"
},
- "_comment": "that's all"
+ "_comment10": "that's all"
}
diff --git a/source/tests/pairwise_dprc.json b/source/tests/pairwise_dprc.json
index 60fb75e775..7c735a8191 100644
--- a/source/tests/pairwise_dprc.json
+++ b/source/tests/pairwise_dprc.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type": "pairwise_dprc",
"type_map": [
@@ -17,8 +17,7 @@
},
"qm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 100,
"rcut_smth": 5.80,
"rcut": 6.00,
@@ -45,8 +44,7 @@
},
"qmmm_model": {
"descriptor": {
- "type": "se_atten",
- "stripped_type_embedding": true,
+ "type": "se_atten_v2",
"sel": 100,
"rcut_smth": 5.80,
"rcut": 6.00,
@@ -160,8 +158,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment2": " display and restart",
+ "_comment3": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -173,5 +171,5 @@
"profiling_file": "timeline.json"
},
- "_comment": "that's all"
+ "_comment4": "that's all"
}
diff --git a/source/tests/polar_se_a.json b/source/tests/polar_se_a.json
index 9830ffd684..624113c340 100644
--- a/source/tests/polar_se_a.json
+++ b/source/tests/polar_se_a.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type": "polar",
"type_map": [
@@ -37,7 +37,7 @@
"resnet_dt": true,
"seed": 1
},
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"learning_rate": {
@@ -45,17 +45,17 @@
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment3": "that's all"
},
"loss": {
"type": "tensor",
"pref": 1.0,
"pref_atomic": 1.0,
- "_comment": "that's all"
+ "_comment4": "that's all"
},
- "_comment": " traing controls",
+ "_comment5": " traing controls",
"systems": [
"system"
],
@@ -67,8 +67,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment6": " display and restart",
+ "_comment7": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 10,
@@ -78,5 +78,5 @@
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment8": "that's all"
}
diff --git a/source/tests/polar_se_a_tebd.json b/source/tests/polar_se_a_tebd.json
index 56e3ed31aa..6bb548a590 100644
--- a/source/tests/polar_se_a_tebd.json
+++ b/source/tests/polar_se_a_tebd.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type": "polar",
"type_map": [
@@ -46,7 +46,7 @@
"resnet_dt": true,
"seed": 1
},
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"learning_rate": {
@@ -54,17 +54,17 @@
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment3": "that's all"
},
"loss": {
"type": "tensor",
"pref": 1.0,
"pref_atomic": 1.0,
- "_comment": "that's all"
+ "_comment4": "that's all"
},
- "_comment": " traing controls",
+ "_comment5": " traing controls",
"systems": [
"system"
],
@@ -76,8 +76,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment6": " display and restart",
+ "_comment7": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 10,
@@ -87,5 +87,5 @@
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment8": "that's all"
}
diff --git a/source/tests/test_data_large_batch.py b/source/tests/test_data_large_batch.py
index 0ea0202c44..3ae46e8cb9 100644
--- a/source/tests/test_data_large_batch.py
+++ b/source/tests/test_data_large_batch.py
@@ -197,37 +197,37 @@ def test_data_mixed_type(self):
np.savetxt("f.out", f.reshape([1, -1]), delimiter=",")
np.savetxt("v.out", v.reshape([1, -1]), delimiter=",")
- refe = [6.121172052273667e01]
+ refe = [6.121172052273665543e01]
reff = [
- 1.1546857028815118e-02,
- 1.7560407103242779e-02,
- 7.1301778864729290e-04,
- 2.3682630974376197e-02,
- 1.6842732518204180e-02,
- -2.2408109608703206e-03,
- -7.9408568690697776e-03,
- 9.6856119564082792e-03,
- 1.9055514693144326e-05,
- 8.7017502459205160e-03,
- -2.7153030569749256e-02,
- -8.8338555421916490e-04,
- -4.3841165945453904e-02,
- 5.8104108317526765e-03,
- 2.6243178542006552e-03,
- 7.8507845654118558e-03,
- -2.2746131839858654e-02,
- -2.3219464245160639e-04,
+ 1.154685702881510720e-02,
+ 1.756040710324277901e-02,
+ 7.130177886472930130e-04,
+ 2.368263097437618356e-02,
+ 1.684273251820418010e-02,
+ -2.240810960870319706e-03,
+ -7.940856869069763679e-03,
+ 9.685611956408284387e-03,
+ 1.905551469314455948e-05,
+ 8.701750245920510801e-03,
+ -2.715303056974926327e-02,
+ -8.833855542191653386e-04,
+ -4.384116594545389017e-02,
+ 5.810410831752661764e-03,
+ 2.624317854200653062e-03,
+ 7.850784565411857499e-03,
+ -2.274613183985864026e-02,
+ -2.321946424516053086e-04,
]
refv = [
- -0.10488160947198523,
- 0.016694308932682225,
- 0.003444164500535988,
- 0.016694308932682235,
- -0.05415326614376374,
- -0.0010792017166882334,
- 0.003444164500535988,
- -0.001079201716688233,
- -0.00020932681975049773,
+ -1.048816094719852016e-01,
+ 1.669430893268222804e-02,
+ 3.444164500535986783e-03,
+ 1.669430893268222110e-02,
+ -5.415326614376372166e-02,
+ -1.079201716688232750e-03,
+ 3.444164500535985916e-03,
+ -1.079201716688232750e-03,
+ -2.093268197504977288e-04,
]
refe = np.reshape(refe, [-1])
@@ -393,37 +393,37 @@ def test_stripped_data_mixed_type(self):
np.savetxt("f11.out", f.reshape([1, -1]), delimiter=",")
np.savetxt("v11.out", v.reshape([1, -1]), delimiter=",")
- refe = [6.125926357944699419e01]
+ refe = [6.124119974943835132e01]
reff = [
- 4.071033392194846855e-03,
- 1.191078506555811808e-02,
- 5.710038490045591959e-04,
- 2.083813511902148086e-02,
- 1.050404909007916256e-02,
- -1.935131519230624082e-03,
- -1.844253334357196135e-03,
- 7.073208513688628192e-03,
- -5.000418009101099666e-05,
- 2.877594036828151017e-03,
- -1.849276075329028823e-02,
- -5.424378676202407318e-04,
- -3.237425532982485255e-02,
- 2.747768700765259881e-03,
- 2.122946741188093227e-03,
- 6.431746116137571252e-03,
- -1.374305061680087571e-02,
- -1.663770232507767613e-04,
+ 8.617444257623986525e-03,
+ 1.622774527785437321e-02,
+ 7.219537519817814273e-04,
+ 2.465257480331137924e-02,
+ 1.507377800325802181e-02,
+ -2.267846199393293988e-03,
+ -6.217685260668888089e-03,
+ 9.187965356558825195e-03,
+ -2.082402632037372596e-05,
+ 6.179226045047841662e-03,
+ -2.505229190184387472e-02,
+ -7.834051085801594424e-04,
+ -4.104669576212031240e-02,
+ 4.721690416727373704e-03,
+ 2.565744238275521286e-03,
+ 7.815135916805987862e-03,
+ -2.015888715255471572e-02,
+ -2.156226559634751916e-04,
]
refv = [
- -5.699812217912397783e-02,
- 8.904976757403395421e-03,
- 2.306167440955633578e-03,
- 8.904976757403414503e-03,
- -3.502855693434053092e-02,
- -6.596869271547717252e-04,
- 2.306167440955633145e-03,
- -6.596869271547715083e-04,
- -1.602012510288682446e-04,
+ -8.500718686149140446e-02,
+ 1.389198522732191729e-02,
+ 3.059204598073241802e-03,
+ 1.389198522732190168e-02,
+ -4.908897840490741155e-02,
+ -9.530658829897690944e-04,
+ 3.059204598073239634e-03,
+ -9.530658829897688776e-04,
+ -1.999114402095244765e-04,
]
refe = np.reshape(refe, [-1])
@@ -589,37 +589,37 @@ def test_compressible_data_mixed_type(self):
np.savetxt("f.out", f.reshape([1, -1]), delimiter=",")
np.savetxt("v.out", v.reshape([1, -1]), delimiter=",")
- refe = [4.978373241868134613e01]
+ refe = [4.951981086834933166e01]
reff = [
- 3.587688614359243466e00,
- 3.202584939641652362e00,
- 1.166711402014127957e-01,
- 2.384342214774975321e00,
- 3.542611694579458348e00,
- -1.916097942322055603e-01,
- -4.120123413353201869e00,
- 1.474564563185293276e00,
- 2.693540383300669847e-02,
- 2.380464377433281431e00,
- -4.807108079981841975e00,
- -1.784915273650321821e-01,
- -5.314498717923408222e00,
- 1.495140750360528958e00,
- 2.602480033292806638e-01,
- 1.082126924709109872e00,
- -4.907793867785092523e00,
- -3.375322576646228034e-02,
+ 3.706988425960650702e00,
+ 3.375774160760826259e00,
+ 1.239489759702384758e-01,
+ 2.575853678437920902e00,
+ 3.699539279116211166e00,
+ -2.069005324163125936e-01,
+ -4.258805446260704564e00,
+ 1.554731495837070154e00,
+ 2.737673623267052048e-02,
+ 2.450754822743671735e00,
+ -5.057615189705980008e00,
+ -1.869152757392671393e-01,
+ -5.623845848960147720e00,
+ 1.555965710447468231e00,
+ 2.781927025028870237e-01,
+ 1.149054368078609167e00,
+ -5.128395456455598023e00,
+ -3.570260655021625928e-02,
]
refv = [
- -1.760844499856655432e01,
- 3.767507287595555532e00,
- 4.505304110104397242e-01,
- 3.767507287595555088e00,
- -1.052518611764145362e01,
- -2.174256785611231313e-01,
- 4.505304110104397797e-01,
- -2.174256785611231868e-01,
- -2.462288791771098315e-02,
+ -1.829433693444094899e01,
+ 3.911090802878004702e00,
+ 4.731456035336862320e-01,
+ 3.911090802878002037e00,
+ -1.103569683318792194e01,
+ -2.277430677764267219e-01,
+ 4.731456035336863986e-01,
+ -2.277430677764267497e-01,
+ -2.613092934079438642e-02,
]
refe = np.reshape(refe, [-1])
diff --git a/source/tests/test_descrpt_se_atten.py b/source/tests/test_descrpt_se_atten.py
index 6f327fa47c..e49e6ab3e2 100644
--- a/source/tests/test_descrpt_se_atten.py
+++ b/source/tests/test_descrpt_se_atten.py
@@ -495,66 +495,66 @@ def test_stripped_type_embedding_descriptor_two_sides(self):
np.savetxt("two1.out", model_dout.reshape([1, -1]), delimiter=",")
ref_dout = [
- 6.383405295803018563e-06,
- -4.970443805148654732e-05,
- -4.970443805148654732e-05,
- 3.915685888463831281e-04,
- -4.475044754350870549e-05,
- 3.524944903606048942e-04,
- -3.724785876580149628e-05,
- 2.922698210610697768e-04,
- 1.253193390649937305e-04,
- -9.866284622165712517e-04,
- 2.746706080574298427e-06,
- -2.936129326691491197e-05,
- -2.936129326691491197e-05,
- 3.361216906441433726e-04,
- -2.672646322529656884e-05,
- 3.054685398038972568e-04,
- -1.987664884734486549e-05,
- 2.233066336404597018e-04,
- 7.321769925898204405e-05,
- -8.355355708215854091e-04,
- 2.577054933601801410e-06,
- -2.596197919081208680e-05,
- -2.596197919081208680e-05,
- 2.634209681343336554e-04,
- -2.437422597229959228e-05,
- 2.471257526952611570e-04,
- -1.804241933402407212e-05,
- 1.824137020231941916e-04,
- 6.580156956353395276e-05,
- -6.669984436619770833e-04,
- 7.430461358270195101e-06,
- -4.911199551777768784e-05,
- -4.911199551777768784e-05,
- 3.246586591299433010e-04,
- -4.545306022962886084e-05,
- 3.004976436904362321e-04,
- -3.943764170159576608e-05,
- 2.607633588556588491e-04,
- 1.265722360907320951e-04,
- -8.368076661582606740e-04,
- 1.006529590251595632e-05,
- -5.947201651377735437e-05,
- -5.947201651377735437e-05,
- 3.519072130827992253e-04,
- -5.475453207501003249e-05,
- 3.240000254870917815e-04,
- -5.001544285922251417e-05,
- 2.958951751684798311e-04,
- 1.541766955454939312e-04,
- -9.123303972245934293e-04,
- 5.302564262473944598e-06,
- -4.087818220772074430e-05,
- -4.087818220772074430e-05,
- 3.170373643181760982e-04,
- -3.805426060094593870e-05,
- 2.950399422301473447e-04,
- -3.093771463791340635e-05,
- 2.394374838886431352e-04,
- 1.045675931841061270e-04,
- -8.106366082292457186e-04,
+ 2.910296358673981606e-06,
+ -3.297689549631518680e-05,
+ -3.297689549631518680e-05,
+ 3.790996417030466402e-04,
+ -3.082208958603667925e-05,
+ 3.544004728264616810e-04,
+ -2.397997896082787038e-05,
+ 2.744923480535521121e-04,
+ 8.486866768450577558e-05,
+ -9.750155670867453753e-04,
+ 8.680391572974659491e-07,
+ -1.596948473518331016e-05,
+ -1.596948473518331016e-05,
+ 3.249686279109944903e-04,
+ -1.508338456375446526e-05,
+ 3.070479490395221158e-04,
+ -1.047241469038003787e-05,
+ 2.085462014454144320e-04,
+ 4.065724483202033993e-05,
+ -8.245932936607477210e-04,
+ 5.959146184656097397e-07,
+ -1.265847984116858078e-05,
+ -1.265847984116858078e-05,
+ 2.713109337202710531e-04,
+ -1.163070862097512446e-05,
+ 2.491582022684395484e-04,
+ -8.056716526966370043e-06,
+ 1.720174894426871476e-04,
+ 3.174999037064446555e-05,
+ -6.798281455902291598e-04,
+ 3.145148216891492605e-06,
+ -3.245585831548520087e-05,
+ -3.245585831548520087e-05,
+ 3.350745140453206166e-04,
+ -2.936281422860278914e-05,
+ 3.031890775924862423e-04,
+ -2.408578375619038739e-05,
+ 2.487530226589902390e-04,
+ 8.275930808338685728e-05,
+ -8.545607559813118157e-04,
+ 4.745334138737575192e-06,
+ -4.149649152356857482e-05,
+ -4.149649152356857482e-05,
+ 3.633282453063247882e-04,
+ -3.734652895210441184e-05,
+ 3.270295126452897193e-04,
+ -3.235347865588130865e-05,
+ 2.832387658145111447e-04,
+ 1.064511649928167193e-04,
+ -9.321000322425568741e-04,
+ 1.879347284602219830e-06,
+ -2.470327295060103235e-05,
+ -2.470327295060103235e-05,
+ 3.269344178119031551e-04,
+ -2.248434624179290029e-05,
+ 2.975826199248595046e-04,
+ -1.721291645154368551e-05,
+ 2.273800448313684436e-04,
+ 6.252118835933537862e-05,
+ -8.271938096175299659e-04,
]
places = 10
@@ -673,66 +673,66 @@ def test_compressible_descriptor_two_sides(self):
np.savetxt("two.out", model_dout.reshape([1, -1]), delimiter=",")
ref_dout = [
- 1.141503478024449673e-02,
- -3.178281748950724847e-05,
- -3.178281748950724847e-05,
- 9.254130621715512665e-08,
- 9.355933444157461115e-03,
- -2.610048686000205533e-05,
- 4.941903103720379602e-03,
- -1.380808118210685460e-05,
- 3.126563600452313824e-02,
- -8.708904919620347271e-05,
- 9.450853881799477565e-03,
- -2.980684015958201359e-05,
- -2.980684015958201359e-05,
- 1.108555048277460046e-07,
- 7.781618028607465050e-03,
- -2.477633432327578414e-05,
- 4.121729175224964070e-03,
- -1.322807700672256342e-05,
- 2.594071094608556283e-02,
- -8.191466287679302258e-05,
- 8.687152275749851840e-03,
- 7.169964582221861242e-04,
- 7.169964582221861242e-04,
- 5.920093158205708974e-05,
- 6.980509354575046600e-03,
- 5.760687878669166736e-04,
- 3.535108314119884531e-03,
- 2.917691604627718586e-04,
- 2.237560827369438829e-02,
- 1.845969886863443182e-03,
- 1.117485257493494588e-02,
- 9.236674604597058238e-04,
- 9.236674604597058238e-04,
- 7.636846801269864835e-05,
- 8.961677239268169004e-03,
- 7.406856311644359698e-04,
- 4.531907674672685724e-03,
- 3.746091075776196411e-04,
- 2.878475290103474707e-02,
- 2.378376253052099428e-03,
- 1.227893341898089741e-02,
- 1.013435353901762093e-03,
- 1.013435353901762093e-03,
- 8.366615519789353964e-05,
- 9.853113810276110246e-03,
- 8.131104560097670826e-04,
- 4.984313719158160738e-03,
- 4.113088418105881284e-04,
- 3.165040520299260834e-02,
- 2.611774507262543468e-03,
- 1.073868692816030805e-02,
- 8.884306507846149482e-04,
- 8.884306507846149482e-04,
- 7.351499010825891778e-05,
- 8.615254842259736923e-03,
- 7.126675675525927581e-04,
- 4.359946655901846685e-03,
- 3.606316239946267860e-04,
- 2.762742703322493970e-02,
- 2.285540100738304818e-03,
+ 1.036073419051481218e-02,
+ 7.240082713918804831e-04,
+ 7.240082713918804831e-04,
+ 5.059763982689874189e-05,
+ 8.861222417326152997e-03,
+ 6.192258716986104783e-04,
+ 5.684670353835866163e-03,
+ 3.972355266104098072e-04,
+ 2.972080556074847488e-02,
+ 2.076940570592187858e-03,
+ 8.618769799976173929e-03,
+ 6.012533410070171639e-04,
+ 6.012533410070171639e-04,
+ 4.196935946091339792e-05,
+ 7.372555367686711193e-03,
+ 5.143434970398183797e-04,
+ 4.737331445281250247e-03,
+ 3.304321425798863437e-04,
+ 2.472045260422556581e-02,
+ 1.724891497741334358e-03,
+ 7.501652728125289375e-03,
+ 6.589020340101068521e-04,
+ 6.589020340101068521e-04,
+ 5.792892984552734919e-05,
+ 6.670726906383729442e-03,
+ 5.860573142386985013e-04,
+ 4.019558129868144349e-03,
+ 3.531475436354094741e-04,
+ 2.075417763310022021e-02,
+ 1.824442459657951146e-03,
+ 9.633741334492003025e-03,
+ 8.463229941979812576e-04,
+ 8.463229941979812576e-04,
+ 7.437495215274456432e-05,
+ 8.566452651264443857e-03,
+ 7.526427265583468876e-04,
+ 5.159465444394889175e-03,
+ 4.533298301373441018e-04,
+ 2.667538316932921080e-02,
+ 2.344288082726328319e-03,
+ 1.059332370946120330e-02,
+ 9.300091136049074697e-04,
+ 9.300091136049074697e-04,
+ 8.164809027640537134e-05,
+ 9.420348275312082423e-03,
+ 8.270372110426749569e-04,
+ 5.675669673060779359e-03,
+ 4.982872107808511419e-04,
+ 2.934228206409428968e-02,
+ 2.576073356437785442e-03,
+ 9.259830885475134332e-03,
+ 8.130992022541684528e-04,
+ 8.130992022541684528e-04,
+ 7.141532944786595336e-05,
+ 8.231990685424640450e-03,
+ 7.228771128684428069e-04,
+ 4.957665460862610216e-03,
+ 4.353342880152572089e-04,
+ 2.560566234978201017e-02,
+ 2.248802567567107294e-03,
]
places = 10
diff --git a/source/tests/test_model_compression_se_atten.py b/source/tests/test_model_compression_se_atten.py
index f047447975..30d78669ce 100644
--- a/source/tests/test_model_compression_se_atten.py
+++ b/source/tests/test_model_compression_se_atten.py
@@ -40,6 +40,19 @@ def _subprocess_run(command):
return popen.returncode
+# 4 tests:
+# - type embedding FP64, se_atten FP64
+# - type embedding FP64, se_atten FP32
+# - type embedding FP32, se_atten FP64
+# - type embedding FP32, se_atten FP32
+tests = [
+ {"se_atten precision": "float64", "type embedding precision": "float64"},
+ {"se_atten precision": "float64", "type embedding precision": "float32"},
+ {"se_atten precision": "float32", "type embedding precision": "float64"},
+ {"se_atten precision": "float32", "type embedding precision": "float32"},
+]
+
+
@unittest.skipIf(
parse_version(tf.__version__) < parse_version("2"),
f"The current tf version {tf.__version__} is too low to run the new testing model.",
@@ -47,18 +60,7 @@ def _subprocess_run(command):
def _init_models():
data_file = str(tests_path / os.path.join("model_compression", "data"))
inputs, frozen_models, compressed_models = [], [], []
- # 4 tests:
- # - type embedding FP64, se_atten FP64
- # - type embedding FP64, se_atten FP32
- # - type embedding FP32, se_atten FP64
- # - type embedding FP32, se_atten FP32
- tests = [
- {"se_atten precision": "float64", "type embedding precision": "float64"},
- {"se_atten precision": "float64", "type embedding precision": "float32"},
- {"se_atten precision": "float32", "type embedding precision": "float64"},
- {"se_atten precision": "float32", "type embedding precision": "float32"},
- ]
- for i in range(4):
+ for i in range(len(tests)):
INPUT = str(tests_path / f"input{i}.json")
frozen_model = str(tests_path / f"dp-original-se-atten{i}.pb")
compressed_model = str(tests_path / f"dp-compressed-se-atten{i}.pb")
@@ -101,18 +103,7 @@ def _init_models():
def _init_models_exclude_types():
data_file = str(tests_path / os.path.join("model_compression", "data"))
inputs, frozen_models, compressed_models = [], [], []
- # 4 tests:
- # - type embedding FP64, se_atten FP64
- # - type embedding FP64, se_atten FP32
- # - type embedding FP32, se_atten FP64
- # - type embedding FP32, se_atten FP32
- tests = [
- {"se_atten precision": "float64", "type embedding precision": "float64"},
- {"se_atten precision": "float64", "type embedding precision": "float32"},
- {"se_atten precision": "float32", "type embedding precision": "float64"},
- {"se_atten precision": "float32", "type embedding precision": "float32"},
- ]
- for i in range(4):
+ for i in range(len(tests)):
INPUT = str(tests_path / f"input{i}.json")
frozen_model = str(tests_path / f"dp-original-se-atten{i}-exclude-types.pb")
compressed_model = str(
@@ -166,8 +157,8 @@ def _get_default_places(nth_test):
class TestDeepPotAPBC(unittest.TestCase):
@classmethod
def setUpClass(self):
- self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(4)]
- self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(4)]
+ self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(len(tests))]
+ self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(len(tests))]
self.coords = np.array(
[
12.83,
@@ -194,7 +185,7 @@ def setUpClass(self):
self.box = np.array([13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0])
def test_attrs(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -212,7 +203,7 @@ def test_attrs(self):
self.assertEqual(dp_compressed.get_dim_aparam(), 0)
def test_1frame(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -238,7 +229,7 @@ def test_1frame(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_1frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -270,7 +261,7 @@ def test_1frame_atm(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_2frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -312,8 +303,8 @@ def test_2frame_atm(self):
class TestDeepPotANoPBC(unittest.TestCase):
@classmethod
def setUpClass(self):
- self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(4)]
- self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(4)]
+ self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(len(tests))]
+ self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(len(tests))]
self.coords = np.array(
[
12.83,
@@ -340,7 +331,7 @@ def setUpClass(self):
self.box = None
def test_1frame(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -366,7 +357,7 @@ def test_1frame(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_1frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -398,7 +389,7 @@ def test_1frame_atm(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_2frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -439,8 +430,8 @@ def test_2frame_atm(self):
class TestDeepPotALargeBoxNoPBC(unittest.TestCase):
@classmethod
def setUpClass(self):
- self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(4)]
- self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(4)]
+ self.dp_originals = [DeepPot(FROZEN_MODELS[i]) for i in range(len(tests))]
+ self.dp_compresseds = [DeepPot(COMPRESSED_MODELS[i]) for i in range(len(tests))]
self.coords = np.array(
[
12.83,
@@ -467,7 +458,7 @@ def setUpClass(self):
self.box = np.array([19.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0])
def test_1frame(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -493,7 +484,7 @@ def test_1frame(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_1frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -525,7 +516,7 @@ def test_1frame_atm(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_ase(self):
- for i in range(4):
+ for i in range(len(tests)):
default_places = _get_default_places(i)
from ase import (
Atoms,
@@ -563,8 +554,10 @@ def test_ase(self):
class TestDeepPotAPBCExcludeTypes(unittest.TestCase):
@classmethod
def setUpClass(self):
- self.dp_originals = [DeepPot(FROZEN_MODELS_ET[i]) for i in range(4)]
- self.dp_compresseds = [DeepPot(COMPRESSED_MODELS_ET[i]) for i in range(4)]
+ self.dp_originals = [DeepPot(FROZEN_MODELS_ET[i]) for i in range(len(tests))]
+ self.dp_compresseds = [
+ DeepPot(COMPRESSED_MODELS_ET[i]) for i in range(len(tests))
+ ]
self.coords = np.array(
[
12.83,
@@ -592,7 +585,7 @@ def setUpClass(self):
@classmethod
def tearDownClass(self):
- for i in range(4):
+ for i in range(len(tests)):
_file_delete(INPUTS_ET[i])
_file_delete(FROZEN_MODELS_ET[i])
_file_delete(COMPRESSED_MODELS_ET[i])
@@ -614,7 +607,7 @@ def tearDownClass(self):
_file_delete("lcurve.out")
def test_attrs(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -632,7 +625,7 @@ def test_attrs(self):
self.assertEqual(dp_compressed.get_dim_aparam(), 0)
def test_1frame(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -658,7 +651,7 @@ def test_1frame(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_1frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
@@ -690,7 +683,7 @@ def test_1frame_atm(self):
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_2frame_atm(self):
- for i in range(4):
+ for i in range(len(tests)):
dp_original = self.dp_originals[i]
dp_compressed = self.dp_compresseds[i]
default_places = _get_default_places(i)
diff --git a/source/tests/test_model_devi.py b/source/tests/test_model_devi.py
index b1c5ec8ead..91c95af46c 100644
--- a/source/tests/test_model_devi.py
+++ b/source/tests/test_model_devi.py
@@ -86,6 +86,33 @@ def test_make_model_devi(self):
x = np.loadtxt(self.output)
np.testing.assert_allclose(x, self.expect, 6)
+ def test_make_model_devi_real_erorr(self):
+ make_model_devi(
+ models=self.graph_dirs,
+ system=self.data_dir,
+ set_prefix="set",
+ output=self.output,
+ frequency=self.freq,
+ real_error=True,
+ )
+ x = np.loadtxt(self.output)
+ np.testing.assert_allclose(
+ x,
+ np.array(
+ [
+ 0.000000e00,
+ 6.709021e-01,
+ 1.634359e-03,
+ 3.219720e-01,
+ 2.018684e00,
+ 1.829748e00,
+ 1.956474e00,
+ 1.550898e02,
+ ]
+ ),
+ 6,
+ )
+
def tearDown(self):
for pb in self.graph_dirs:
os.remove(pb)
diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py
index af247c305f..6e6e9928a6 100644
--- a/source/tests/test_model_se_atten.py
+++ b/source/tests/test_model_se_atten.py
@@ -374,37 +374,37 @@ def test_compressible_model(self):
np.savetxt("f.out", f.reshape([1, -1]), delimiter=",")
np.savetxt("v.out", v.reshape([1, -1]), delimiter=",")
- refe = [4.978373241868134613e01]
+ refe = [4.951981086834933166e01]
reff = [
- 3.587688614359243466e00,
- 3.202584939641652362e00,
- 1.166711402014127957e-01,
- 2.384342214774975321e00,
- 3.542611694579458348e00,
- -1.916097942322055603e-01,
- -4.120123413353201869e00,
- 1.474564563185293276e00,
- 2.693540383300669847e-02,
- 2.380464377433281431e00,
- -4.807108079981841975e00,
- -1.784915273650321821e-01,
- -5.314498717923408222e00,
- 1.495140750360528958e00,
- 2.602480033292806638e-01,
- 1.082126924709109872e00,
- -4.907793867785092523e00,
- -3.375322576646228034e-02,
+ 3.706988425960650702e00,
+ 3.375774160760826259e00,
+ 1.239489759702384758e-01,
+ 2.575853678437920902e00,
+ 3.699539279116211166e00,
+ -2.069005324163125936e-01,
+ -4.258805446260704564e00,
+ 1.554731495837070154e00,
+ 2.737673623267052048e-02,
+ 2.450754822743671735e00,
+ -5.057615189705980008e00,
+ -1.869152757392671393e-01,
+ -5.623845848960147720e00,
+ 1.555965710447468231e00,
+ 2.781927025028870237e-01,
+ 1.149054368078609167e00,
+ -5.128395456455598023e00,
+ -3.570260655021625928e-02,
]
refv = [
- -1.760844499856656498e01,
- 3.767507287595556420e00,
- 4.505304110104396687e-01,
- 3.767507287595555532e00,
- -1.052518611764145540e01,
- -2.174256785611232701e-01,
- 4.505304110104397797e-01,
- -2.174256785611233256e-01,
- -2.462288791771097621e-02,
+ -1.829433693444095255e01,
+ 3.911090802878004258e00,
+ 4.731456035336862320e-01,
+ 3.911090802878003370e00,
+ -1.103569683318792372e01,
+ -2.277430677764266387e-01,
+ 4.731456035336862875e-01,
+ -2.277430677764267775e-01,
+ -2.613092934079439336e-02,
]
refe = np.reshape(refe, [-1])
@@ -604,37 +604,37 @@ def test_stripped_type_embedding_model(self):
np.savetxt("f.out", f.reshape([1, -1]), delimiter=",")
np.savetxt("v.out", v.reshape([1, -1]), delimiter=",")
- refe = [6.125926357944699419e01]
+ refe = [6.124119974943835132e01]
reff = [
- 4.071033392194846855e-03,
- 1.191078506555811808e-02,
- 5.710038490045591959e-04,
- 2.083813511902148086e-02,
- 1.050404909007916256e-02,
- -1.935131519230624082e-03,
- -1.844253334357196135e-03,
- 7.073208513688628192e-03,
- -5.000418009101099666e-05,
- 2.877594036828151017e-03,
- -1.849276075329028823e-02,
- -5.424378676202407318e-04,
- -3.237425532982485255e-02,
- 2.747768700765259881e-03,
- 2.122946741188093227e-03,
- 6.431746116137571252e-03,
- -1.374305061680087571e-02,
- -1.663770232507767613e-04,
+ 8.617444257623986525e-03,
+ 1.622774527785437321e-02,
+ 7.219537519817814273e-04,
+ 2.465257480331137924e-02,
+ 1.507377800325802181e-02,
+ -2.267846199393293988e-03,
+ -6.217685260668888089e-03,
+ 9.187965356558825195e-03,
+ -2.082402632037372596e-05,
+ 6.179226045047841662e-03,
+ -2.505229190184387472e-02,
+ -7.834051085801594424e-04,
+ -4.104669576212031240e-02,
+ 4.721690416727373704e-03,
+ 2.565744238275521286e-03,
+ 7.815135916805987862e-03,
+ -2.015888715255471572e-02,
+ -2.156226559634751916e-04,
]
refv = [
- -5.699812217912397089e-02,
- 8.904976757403390217e-03,
- 2.306167440955633578e-03,
- 8.904976757403417972e-03,
- -3.502855693434053092e-02,
- -6.596869271547715083e-04,
- 2.306167440955633578e-03,
- -6.596869271547720504e-04,
- -1.602012510288682717e-04,
+ -8.500718686149139058e-02,
+ 1.389198522732191729e-02,
+ 3.059204598073241802e-03,
+ 1.389198522732190515e-02,
+ -4.908897840490741848e-02,
+ -9.530658829897693113e-04,
+ 3.059204598073239634e-03,
+ -9.530658829897692029e-04,
+ -1.999114402095244223e-04,
]
refe = np.reshape(refe, [-1])
diff --git a/source/tests/train_dos.json b/source/tests/train_dos.json
index cab1e6e452..d08fbd90e1 100644
--- a/source/tests/train_dos.json
+++ b/source/tests/train_dos.json
@@ -68,5 +68,5 @@
"profiling_file": "timeline.json"
},
- "_comment": "that's all"
+ "_comment1": "that's all"
}
diff --git a/source/tests/water.json b/source/tests/water.json
index 32522776a4..6b61ce98ae 100644
--- a/source/tests/water.json
+++ b/source/tests/water.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "loc_frame",
@@ -26,12 +26,12 @@
1,
0
],
- "_comment": " default rule: []",
- "_comment": " user defined rule: for each type provides two axes, ",
- "_comment": " for each axis: (a_or_r, type, idx)",
- "_comment": " if type < 0, exclude type -(type+1)",
- "_comment": " for water (O:0, H:1) it can be",
- "_comment": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]"
+ "_comment2": " default rule: []",
+ "_comment3": " user defined rule: for each type provides two axes, ",
+ "_comment4": " for each axis: (a_or_r, type, idx)",
+ "_comment5": " if type < 0, exclude type -(type+1)",
+ "_comment6": " for water (O:0, H:1) it can be",
+ "_comment7": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]"
},
"fitting_net": {
"neuron": [
@@ -45,7 +45,7 @@
}
},
- "_comment": " traing controls",
+ "_comment8": " traing controls",
"systems": [
"system"
],
@@ -65,8 +65,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment9": " display and restart",
+ "_comment10": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -76,5 +76,5 @@
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment11": "that's all"
}
diff --git a/source/tests/water_hybrid.json b/source/tests/water_hybrid.json
index cea2321f57..4e813beec1 100644
--- a/source/tests/water_hybrid.json
+++ b/source/tests/water_hybrid.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -66,7 +66,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -86,8 +86,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -99,5 +99,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_layer_name.json b/source/tests/water_layer_name.json
index 67ea10d1bd..caa953c903 100644
--- a/source/tests/water_layer_name.json
+++ b/source/tests/water_layer_name.json
@@ -1,5 +1,5 @@
{
- "_comment": "layer_name",
+ "_comment1": "layer_name",
"model": {
"descriptor": {
"type": "se_a",
@@ -58,7 +58,7 @@
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment2": "that's all"
},
"loss_dict": {
@@ -82,7 +82,7 @@
}
},
- "_comment": " traing controls",
+ "_comment3": " traing controls",
"systems": [
"system"
],
@@ -102,5 +102,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment4": "that's all"
}
diff --git a/source/tests/water_multi.json b/source/tests/water_multi.json
index e74b724157..2ccbaa9384 100644
--- a/source/tests/water_multi.json
+++ b/source/tests/water_multi.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a",
@@ -52,14 +52,14 @@
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment2": "that's all"
},
"water_dipole": {
"type": "exp",
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment3": "that's all"
}
},
@@ -80,7 +80,7 @@
}
},
- "_comment": " traing controls",
+ "_comment4": " traing controls",
"systems": [
"system"
],
@@ -100,5 +100,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a.json b/source/tests/water_se_a.json
index 4cc9e3c21d..e864d85f6b 100644
--- a/source/tests/water_se_a.json
+++ b/source/tests/water_se_a.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a",
@@ -29,7 +29,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -49,8 +49,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -62,5 +62,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_afparam.json b/source/tests/water_se_a_afparam.json
index fa4ab39231..6504643b0c 100644
--- a/source/tests/water_se_a_afparam.json
+++ b/source/tests/water_se_a_afparam.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a",
@@ -31,7 +31,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -51,8 +51,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -64,5 +64,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_aparam.json b/source/tests/water_se_a_aparam.json
index d1a453e129..4a0c478de5 100644
--- a/source/tests/water_se_a_aparam.json
+++ b/source/tests/water_se_a_aparam.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a",
@@ -30,7 +30,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -50,8 +50,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -63,5 +63,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_ebd.json b/source/tests/water_se_a_ebd.json
index 528786c4ef..3ce0653d28 100644
--- a/source/tests/water_se_a_ebd.json
+++ b/source/tests/water_se_a_ebd.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a_ebd",
@@ -28,7 +28,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -48,8 +48,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -61,5 +61,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_fparam.json b/source/tests/water_se_a_fparam.json
index 48cefcfc86..fff46098b8 100644
--- a/source/tests/water_se_a_fparam.json
+++ b/source/tests/water_se_a_fparam.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_a",
@@ -30,7 +30,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -50,8 +50,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -63,5 +63,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_srtab.json b/source/tests/water_se_a_srtab.json
index 962659d9f0..dda7f52dbf 100644
--- a/source/tests/water_se_a_srtab.json
+++ b/source/tests/water_se_a_srtab.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"use_srtab": "tab.xvg",
"smin_alpha": 0.3,
@@ -33,7 +33,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -53,8 +53,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -66,5 +66,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_a_type.json b/source/tests/water_se_a_type.json
index e764b22d0f..9f5e4e7a51 100644
--- a/source/tests/water_se_a_type.json
+++ b/source/tests/water_se_a_type.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_embedding": {
"neuron": [
@@ -39,7 +39,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -59,8 +59,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -72,5 +72,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_atten.json b/source/tests/water_se_atten.json
index 2b46f06b6a..c8799bf254 100644
--- a/source/tests/water_se_atten.json
+++ b/source/tests/water_se_atten.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"O",
@@ -42,7 +42,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -62,8 +62,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -75,5 +75,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_atten_compressible_mixed_type.json b/source/tests/water_se_atten_compressible_mixed_type.json
index 04c84e3160..fbf723d10a 100644
--- a/source/tests/water_se_atten_compressible_mixed_type.json
+++ b/source/tests/water_se_atten_compressible_mixed_type.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"foo",
@@ -43,7 +43,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system_mixed_type"
],
@@ -63,8 +63,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -76,5 +76,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_atten_mixed_type.json b/source/tests/water_se_atten_mixed_type.json
index 67ab8e782d..8edabb394a 100644
--- a/source/tests/water_se_atten_mixed_type.json
+++ b/source/tests/water_se_atten_mixed_type.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type_map": [
"foo",
@@ -42,7 +42,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system_mixed_type"
],
@@ -62,8 +62,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -75,5 +75,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_r.json b/source/tests/water_se_r.json
index 37c0239b36..eb12f359cc 100644
--- a/source/tests/water_se_r.json
+++ b/source/tests/water_se_r.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_r",
@@ -28,7 +28,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -48,8 +48,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -61,5 +61,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/water_se_t.json b/source/tests/water_se_t.json
index e7d1b3e486..7ce666e4aa 100644
--- a/source/tests/water_se_t.json
+++ b/source/tests/water_se_t.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"descriptor": {
"type": "se_3be",
@@ -28,7 +28,7 @@
}
},
- "_comment": " traing controls",
+ "_comment2": " traing controls",
"systems": [
"system"
],
@@ -48,8 +48,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment3": " display and restart",
+ "_comment4": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 1,
@@ -61,5 +61,5 @@
"profiling": false,
"profiling_file": "timeline.json",
- "_comment": "that's all"
+ "_comment5": "that's all"
}
diff --git a/source/tests/wfc.json b/source/tests/wfc.json
index b1cb832a33..b2c98ab136 100644
--- a/source/tests/wfc.json
+++ b/source/tests/wfc.json
@@ -1,5 +1,5 @@
{
- "_comment": " model parameters",
+ "_comment1": " model parameters",
"model": {
"type": "polar",
"type_map": [
@@ -31,13 +31,13 @@
1,
0
],
- "_comment": " default rule: []",
- "_comment": " user defined rule: for each type provides two axes, ",
- "_comment": " for each axis: (a_or_r, type, idx)",
- "_comment": " if type < 0, exclude type -(type+1)",
- "_comment": " for water (O:0, H:1) it can be",
- "_comment": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]",
- "_comment": " that's all"
+ "_comment2": " default rule: []",
+ "_comment3": " user defined rule: for each type provides two axes, ",
+ "_comment4": " for each axis: (a_or_r, type, idx)",
+ "_comment5": " if type < 0, exclude type -(type+1)",
+ "_comment6": " for water (O:0, H:1) it can be",
+ "_comment7": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]",
+ "_comment8": " that's all"
},
"fitting_net": {
"type": "wfc",
@@ -52,9 +52,9 @@
],
"resnet_dt": true,
"seed": 1,
- "_comment": " that's all"
+ "_comment9": " that's all"
},
- "_comment": " that's all"
+ "_comment10": " that's all"
},
"learning_rate": {
@@ -62,10 +62,10 @@
"start_lr": 0.001,
"decay_steps": 5000,
"decay_rate": 0.95,
- "_comment": "that's all"
+ "_comment11": "that's all"
},
- "_comment": " traing controls",
+ "_comment12": " traing controls",
"systems": [
"system"
],
@@ -77,8 +77,8 @@
"seed": 1,
- "_comment": " display and restart",
- "_comment": " frequencies counted in batch",
+ "_comment13": " display and restart",
+ "_comment14": " frequencies counted in batch",
"disp_file": "lcurve.out",
"disp_freq": 100,
"numb_test": 10,
@@ -88,5 +88,5 @@
"disp_training": true,
"time_training": true,
- "_comment": "that's all"
+ "_comment15": "that's all"
}
diff --git a/source/tests/zinc_se_a_mask.json b/source/tests/zinc_se_a_mask.json
index 66aa85b637..bf8c3760c4 100644
--- a/source/tests/zinc_se_a_mask.json
+++ b/source/tests/zinc_se_a_mask.json
@@ -1,5 +1,5 @@
{
- "_comment": "that's all",
+ "_comment1": "that's all",
"model": {
"type_map": [
"C",
@@ -20,7 +20,7 @@
"resnet_dt": true,
"axis_neuron": 2,
"seed": 1,
- "_comment": " that's all"
+ "_comment2": " that's all"
},
"fitting_net": {
"neuron": [
@@ -32,16 +32,16 @@
"seed": 1,
"numb_aparam": 1,
"use_aparam_as_mask": true,
- "_comment": " that's all"
+ "_comment3": " that's all"
},
- "_comment": " that's all"
+ "_comment4": " that's all"
},
"learning_rate": {
"type": "exp",
"decay_steps": 20000,
"start_lr": 0.001,
"stop_lr": 3.51e-08,
- "_comment": "that's all"
+ "_comment5": "that's all"
},
"loss": {
"type": "ener",
@@ -51,13 +51,13 @@
"limit_pref_f": 0.0,
"start_pref_pf": 1.0,
"limit_pref_pf": 1.0,
- "_comment": " that's all"
+ "_comment6": " that's all"
},
"training": {
"training_data": {
"systems": "source/tests/data_dp_mask/",
"batch_size": 4,
- "_comment": "that's all"
+ "_comment7": "that's all"
},
"validation_data": {
"systems": [
@@ -65,13 +65,13 @@
],
"batch_size": 4,
"numb_btch": 1,
- "_comment": "that's all"
+ "_comment8": "that's all"
},
"numb_steps": 1,
"seed": 10,
"tensorboard": true,
"tensorboard_log_dir": "log4tensorboard",
"tensorboard_freq": 100,
- "_comment": "that's all"
+ "_comment9": "that's all"
}
}