Skip to content

Commit

Permalink
Merge branch 'devel' of https://github.com/deepmodeling/deepmd-kit in…
Browse files Browse the repository at this point in the history
…to mapping
  • Loading branch information
CaRoLZhangxy committed Mar 21, 2024
2 parents 49f0204 + 5aa1b89 commit d8cf4c7
Show file tree
Hide file tree
Showing 183 changed files with 3,437 additions and 1,210 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ jobs:
rm -rf .git
if: matrix.dp_pkg_name == 'deepmd-kit-cu11'
- name: Build wheels
uses: pypa/cibuildwheel@v2.16
uses: pypa/cibuildwheel@v2.17
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_ARCHS: all
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/package_c.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:
- name: Test C library
run: ./source/install/docker_test_package_c.sh
- name: Release
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: ${{ matrix.filename }}
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/test_cc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ jobs:
run: |
wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.1.2%2Bcpu.zip -O libtorch.zip
unzip libtorch.zip
# https://github.com/actions/runner-images/issues/9491
- name: Fix kernel mmap rnd bits
run: sudo sysctl vm.mmap_rnd_bits=28
if: ${{ matrix.check_memleak }}
- run: |
export CMAKE_PREFIX_PATH=$GITHUB_WORKSPACE/libtorch
source/install/test_cc_local.sh
Expand Down
20 changes: 20 additions & 0 deletions .github/workflows/todo.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
name: TODO workflow
on:
push:
branches:
- devel
jobs:
build:
if: github.repository_owner == 'deepmodeling'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run tdg-github-action
uses: ribtoks/tdg-github-action@master
with:
TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
SHA: ${{ github.sha }}
REF: ${{ github.ref }}
EXCLUDE_PATTERN: "(source/3rdparty|.git)/.*"
COMMENT_ON_ISSUES: 1
5 changes: 2 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ repos:
- id: check-json
- id: check-added-large-files
args: ['--maxkb=1024', '--enforce-all']
# TODO: remove the following after resolved
exclude: |
(?x)^(
source/tests/infer/dipolecharge_e.pbtxt|
Expand All @@ -30,7 +29,7 @@ repos:
exclude: ^source/3rdparty
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.2.2
rev: v0.3.3
hooks:
- id: ruff
args: ["--fix"]
Expand All @@ -53,7 +52,7 @@ repos:
- id: blacken-docs
# C++
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v17.0.6
rev: v18.1.1
hooks:
- id: clang-format
exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc
Expand Down
1 change: 1 addition & 0 deletions backend/dp_backend.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
"""A PEP-517 backend to find TensorFlow."""

from typing import (
List,
)
Expand Down
4 changes: 3 additions & 1 deletion backend/find_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def find_tensorflow() -> Tuple[Optional[str], List[str]]:
# TypeError if submodule_search_locations are None
# IndexError if submodule_search_locations is an empty list
except (AttributeError, TypeError, IndexError):
tf_version = ""
if os.environ.get("CIBUILDWHEEL", "0") == "1":
cuda_version = os.environ.get("CUDA_VERSION", "12.2")
if cuda_version == "" or cuda_version in SpecifierSet(">=12,<13"):
Expand All @@ -99,9 +100,10 @@ def find_tensorflow() -> Tuple[Optional[str], List[str]]:
"tensorflow-cpu>=2.5.0rc0,<2.15; platform_machine=='x86_64' and platform_system == 'Linux'",
]
)
tf_version = "2.14.1"
else:
raise RuntimeError("Unsupported CUDA version")
requires.extend(get_tf_requirement()["cpu"])
requires.extend(get_tf_requirement(tf_version)["cpu"])
# setuptools will re-find tensorflow after installing setup_requires
tf_install_dir = None
return tf_install_dir, requires
Expand Down
1 change: 1 addition & 0 deletions deepmd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
The top module (deepmd.__init__) should not import any third-party
modules for performance.
"""

try:
from deepmd._version import version as __version__
except ImportError:
Expand Down
1 change: 1 addition & 0 deletions deepmd/backend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
Avoid directly importing third-party libraries in this module for performance.
"""

# copy from dpdata
from importlib import (
import_module,
Expand Down
14 changes: 8 additions & 6 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,9 @@
)


# TODO this is not a good way to do things. This is some global variable to which
# TODO anyone can write and there is no good way to keep track of the changes
# TODO: refactor data_requirement to make it not a global variable
# this is not a good way to do things. This is some global variable to which
# anyone can write and there is no good way to keep track of the changes
data_requirement = {}


Expand Down Expand Up @@ -180,9 +181,10 @@ def make_default_mesh(pbc: bool, mixed_type: bool) -> np.ndarray:
return default_mesh


# TODO maybe rename this to j_deprecated and only warn about deprecated keys,
# TODO if the deprecated_key argument is left empty function puppose is only custom
# TODO error since dict[key] already raises KeyError when the key is missing
# TODO: rename j_must_have to j_deprecated and only warn about deprecated keys
# maybe rename this to j_deprecated and only warn about deprecated keys,
# if the deprecated_key argument is left empty function puppose is only custom
# error since dict[key] already raises KeyError when the key is missing
def j_must_have(
jdata: Dict[str, "_DICT_VAL"], key: str, deprecated_key: List[str] = []
) -> "_DICT_VAL":
Expand Down Expand Up @@ -238,7 +240,7 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]:
raise TypeError("config file must be json, or yaml/yml")


# TODO port completely to pathlib when all callers are ported
# TODO port expand_sys_str completely to pathlib when all callers are ported
def expand_sys_str(root_dir: Union[str, Path]) -> List[str]:
"""Recursively iterate over directories taking those that contain `type.raw` file.
Expand Down
1 change: 0 additions & 1 deletion deepmd/dpmodel/atomic_model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
"""


from .base_atomic_model import (
BaseAtomicModel,
)
Expand Down
82 changes: 57 additions & 25 deletions deepmd/dpmodel/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,22 +56,19 @@ def reinit_pair_exclude(

def atomic_output_def(self) -> FittingOutputDef:
old_def = self.fitting_output_def()
if self.atom_excl is None:
return old_def
else:
old_list = list(old_def.get_data().values())
return FittingOutputDef(
old_list # noqa:RUF005
+ [
OutputVariableDef(
name="mask",
shape=[1],
reduciable=False,
r_differentiable=False,
c_differentiable=False,
)
]
)
old_list = list(old_def.get_data().values())
return FittingOutputDef(
old_list # noqa:RUF005
+ [
OutputVariableDef(
name="mask",
shape=[1],
reduciable=False,
r_differentiable=False,
c_differentiable=False,
)
]
)

def forward_common_atomic(
self,
Expand All @@ -82,31 +79,66 @@ def forward_common_atomic(
fparam: Optional[np.ndarray] = None,
aparam: Optional[np.ndarray] = None,
) -> Dict[str, np.ndarray]:
"""Common interface for atomic inference.
This method accept extended coordinates, extended atom typs, neighbor list,
and predict the atomic contribution of the fit property.
Parameters
----------
extended_coord
extended coodinates, shape: nf x (nall x 3)
extended_atype
extended atom typs, shape: nf x nall
for a type < 0 indicating the atomic is virtual.
nlist
neighbor list, shape: nf x nloc x nsel
mapping
extended to local index mapping, shape: nf x nall
fparam
frame parameters, shape: nf x dim_fparam
aparam
atomic parameter, shape: nf x nloc x dim_aparam
Returns
-------
ret_dict
dict of output atomic properties.
should implement the definition of `fitting_output_def`.
ret_dict["mask"] of shape nf x nloc will be provided.
ret_dict["mask"][ff,ii] == 1 indicating the ii-th atom of the ff-th frame is real.
ret_dict["mask"][ff,ii] == 0 indicating the ii-th atom of the ff-th frame is virtual.
"""
_, nloc, _ = nlist.shape
atype = extended_atype[:, :nloc]
if self.pair_excl is not None:
pair_mask = self.pair_excl.build_type_exclude_mask(nlist, extended_atype)
# exclude neighbors in the nlist
nlist = np.where(pair_mask == 1, nlist, -1)

ext_atom_mask = self.make_atom_mask(extended_atype)
ret_dict = self.forward_atomic(
extended_coord,
extended_atype,
np.where(ext_atom_mask, extended_atype, 0),
nlist,
mapping=mapping,
fparam=fparam,
aparam=aparam,
)

# nf x nloc
atom_mask = ext_atom_mask[:, :nloc].astype(np.int32)
if self.atom_excl is not None:
atom_mask = self.atom_excl.build_type_exclude_mask(atype)
for kk in ret_dict.keys():
out_shape = ret_dict[kk].shape
ret_dict[kk] = (
ret_dict[kk].reshape([out_shape[0], out_shape[1], -1])
* atom_mask[:, :, None]
).reshape(out_shape)
ret_dict["mask"] = atom_mask
atom_mask *= self.atom_excl.build_type_exclude_mask(atype)

for kk in ret_dict.keys():
out_shape = ret_dict[kk].shape
ret_dict[kk] = (
ret_dict[kk].reshape([out_shape[0], out_shape[1], -1])
* atom_mask[:, :, None]
).reshape(out_shape)
ret_dict["mask"] = atom_mask

return ret_dict

Expand Down
1 change: 1 addition & 0 deletions deepmd/dpmodel/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
)


@BaseAtomicModel.register("standard")
class DPAtomicModel(BaseAtomicModel):
"""Model give atomic prediction of some physical property.
Expand Down
49 changes: 21 additions & 28 deletions deepmd/dpmodel/atomic_model/linear_atomic_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
import sys
from abc import (
abstractmethod,
)
from typing import (
Dict,
List,
Expand Down Expand Up @@ -225,40 +221,38 @@ def fitting_output_def(self) -> FittingOutputDef:
]
)

@staticmethod
def serialize(models, type_map) -> dict:
def serialize(self) -> dict:
return {
"@class": "Model",
"type": "linear",
"@version": 1,
"models": [model.serialize() for model in models],
"model_name": [model.__class__.__name__ for model in models],
"type_map": type_map,
"models": [model.serialize() for model in self.models],
"type_map": self.type_map,
}

@staticmethod
def deserialize(data) -> Tuple[List[BaseAtomicModel], List[str]]:
@classmethod
def deserialize(cls, data: dict) -> "LinearEnergyAtomicModel":
data = copy.deepcopy(data)
check_version_compatibility(data.pop("@version", 1), 1, 1)
data.pop("@class")
data.pop("type")
model_names = data["model_name"]
type_map = data["type_map"]
type_map = data.pop("type_map")
models = [
getattr(sys.modules[__name__], name).deserialize(model)
for name, model in zip(model_names, data["models"])
BaseAtomicModel.get_class_by_type(model["type"]).deserialize(model)
for model in data["models"]
]
return models, type_map
data.pop("models")
return cls(models, type_map, **data)

@abstractmethod
def _compute_weight(
self,
extended_coord: np.ndarray,
extended_atype: np.ndarray,
nlists_: List[np.ndarray],
) -> np.ndarray:
) -> List[np.ndarray]:
"""This should be a list of user defined weights that matches the number of models to be combined."""
raise NotImplementedError
nmodels = len(self.models)
return [np.ones(1) / nmodels for _ in range(nmodels)]

def get_dim_fparam(self) -> int:
"""Get the number (dimension) of frame parameters of this atomic model."""
Expand Down Expand Up @@ -335,10 +329,10 @@ def serialize(self) -> dict:
{
"@class": "Model",
"type": "zbl",
"@version": 1,
"models": LinearEnergyAtomicModel.serialize(
[self.dp_model, self.zbl_model], self.type_map
),
"@version": 2,
"models": LinearEnergyAtomicModel(
models=[self.models[0], self.models[1]], type_map=self.type_map
).serialize(),
"sw_rmin": self.sw_rmin,
"sw_rmax": self.sw_rmax,
"smin_alpha": self.smin_alpha,
Expand All @@ -349,16 +343,15 @@ def serialize(self) -> dict:
@classmethod
def deserialize(cls, data) -> "DPZBLLinearEnergyAtomicModel":
data = copy.deepcopy(data)
check_version_compatibility(data.pop("@version", 1), 1, 1)
check_version_compatibility(data.pop("@version", 1), 2, 1)
data.pop("@class")
data.pop("type")
sw_rmin = data.pop("sw_rmin")
sw_rmax = data.pop("sw_rmax")
smin_alpha = data.pop("smin_alpha")

([dp_model, zbl_model], type_map) = LinearEnergyAtomicModel.deserialize(
data.pop("models")
)
linear_model = LinearEnergyAtomicModel.deserialize(data.pop("models"))
dp_model, zbl_model = linear_model.models
type_map = linear_model.type_map

return cls(
dp_model=dp_model,
Expand Down
Loading

0 comments on commit d8cf4c7

Please sign in to comment.