From 644cc52b9709b99c0fa19a94da8beb542947e39d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 1 Mar 2024 21:09:26 -0500 Subject: [PATCH 01/30] tf: remove freeze warning for optional nodes (#3381) Fix #3334. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit 92ee6326aecec689882d49729f09b791309f9064) --- deepmd/entrypoints/freeze.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/deepmd/entrypoints/freeze.py b/deepmd/entrypoints/freeze.py index 543e89b4ab..f13ea85ab0 100755 --- a/deepmd/entrypoints/freeze.py +++ b/deepmd/entrypoints/freeze.py @@ -357,13 +357,21 @@ def freeze_graph( output_node = _make_node_names( freeze_type, modifier, out_suffix=out_suffix, node_names=node_names ) + # see #3334 + optional_node = [ + "train_attr/min_nbor_dist", + "fitting_attr/aparam_nall", + "spin_attr/ntypes_spin", + ] different_set = set(output_node) - set(input_node) if different_set: - log.warning( - "The following nodes are not in the graph: %s. " - "Skip freezeing these nodes. You may be freezing " - "a checkpoint generated by an old version." % different_set - ) + different_set -= set(optional_node) + if different_set: + log.warning( + "The following nodes are not in the graph: %s. " + "Skip freezeing these nodes. You may be freezing " + "a checkpoint generated by an old version." % different_set + ) # use intersection as output list output_node = list(set(output_node) & set(input_node)) log.info(f"The following nodes will be frozen: {output_node}") From dbf5d4a2bed882cfaa4c5581d5bd4354aeba1fec Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Apr 2024 13:22:48 -0400 Subject: [PATCH 02/30] fix: set rpath for libtorch and protobuf (#3636) Fix #3568. Set run path for libtorch and protobuf. Refactor the original `TensorFlow_LIBRARY_PATH` to `BACKEND_LIBRARY_PATH` (list). --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit 4c546d0a167afb7aba400324a219bef553e0528b) Signed-off-by: Jinzhe Zeng --- source/CMakeLists.txt | 4 ++++ source/api_c/CMakeLists.txt | 4 ++-- source/api_c/tests/CMakeLists.txt | 2 +- source/api_cc/CMakeLists.txt | 6 ++++-- source/gmx/CMakeLists.txt | 2 +- source/ipi/CMakeLists.txt | 4 ++-- source/lmp/env.sh.in | 4 ++-- source/lmp/env_c.sh.in | 4 ++-- source/lmp/env_py.sh.in | 4 ++-- source/lmp/env_py_c.sh.in | 4 ++-- source/lmp/plugin/CMakeLists.txt | 2 +- source/op/CMakeLists.txt | 5 ++--- 12 files changed, 25 insertions(+), 20 deletions(-) diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index f7bef0b2d9..cbc73735fe 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -145,6 +145,8 @@ endif(USE_TF_PYTHON_LIBS) # find tensorflow, I need tf abi info if(NOT DEEPMD_C_ROOT) find_package(tensorflow REQUIRED) + list(APPEND BACKEND_LIBRARY_PATH ${TensorFlow_LIBRARY_PATH}) + list(APPEND BACKEND_INCLUDE_DIRS ${TENSORFLOW_INCLUDE_DIRS}) endif() # find threads @@ -233,7 +235,9 @@ if(DEEPMD_C_ROOT) # use variable for TF path to set deepmd_c path set(TENSORFLOW_ROOT "${DEEPMD_C_ROOT}") set(TensorFlow_LIBRARY_PATH "${DEEPMD_C_ROOT}/lib") + set(BACKEND_LIBRARY_PATH "${DEEPMD_C_ROOT}/lib") set(TENSORFLOW_INCLUDE_DIRS "${DEEPMD_C_ROOT}/include") + set(BACKEND_INCLUDE_DIRS "${DEEPMD_C_ROOT}/include") endif() if(NOT DEEPMD_C_ROOT) diff --git a/source/api_c/CMakeLists.txt b/source/api_c/CMakeLists.txt index f6e741105a..f0b36a62d7 100644 --- a/source/api_c/CMakeLists.txt +++ b/source/api_c/CMakeLists.txt @@ -12,8 +12,8 @@ target_include_directories( ${libname} PUBLIC $ $) -set_target_properties( - ${libname} PROPERTIES INSTALL_RPATH "$ORIGIN;${TensorFlow_LIBRARY_PATH}") +set_target_properties(${libname} PROPERTIES INSTALL_RPATH + "$ORIGIN;${BACKEND_LIBRARY_PATH}") if(CMAKE_TESTING_ENABLED) target_link_libraries(${libname} PRIVATE coverage_config) endif() diff --git a/source/api_c/tests/CMakeLists.txt b/source/api_c/tests/CMakeLists.txt index d4233a83e8..1b035b1a6c 100644 --- a/source/api_c/tests/CMakeLists.txt +++ b/source/api_c/tests/CMakeLists.txt @@ -4,7 +4,7 @@ project(deepmd_api_c_test) file(GLOB TEST_SRC test_*.cc) set_target_properties( - ${LIB_DEEPMD_C} PROPERTIES INSTALL_RPATH "$ORIGIN;${TensorFlow_LIBRARY_PATH}") + ${LIB_DEEPMD_C} PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}") add_executable(runUnitTests_c ${TEST_SRC}) target_link_libraries(runUnitTests_c PRIVATE GTest::gtest_main ${LIB_DEEPMD_C} diff --git a/source/api_cc/CMakeLists.txt b/source/api_cc/CMakeLists.txt index 2f296e3dfd..15d3844b36 100644 --- a/source/api_cc/CMakeLists.txt +++ b/source/api_cc/CMakeLists.txt @@ -25,8 +25,10 @@ if(Protobuf_LIBRARY) endif() set_target_properties( - ${libname} PROPERTIES INSTALL_RPATH "$ORIGIN;${TensorFlow_LIBRARY_PATH}" - BUILD_RPATH "$ORIGIN/../op") + ${libname} + PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}" + INSTALL_RPATH_USE_LINK_PATH TRUE + BUILD_RPATH "$ORIGIN/../op") target_compile_definitions(${libname} PRIVATE TF_PRIVATE) if(CMAKE_TESTING_ENABLED) target_link_libraries(${libname} PRIVATE coverage_config) diff --git a/source/gmx/CMakeLists.txt b/source/gmx/CMakeLists.txt index d445479d39..8fde1e6ab8 100644 --- a/source/gmx/CMakeLists.txt +++ b/source/gmx/CMakeLists.txt @@ -26,7 +26,7 @@ target_include_directories(${libgmxname} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../3rdparty/) set_target_properties( - ${libgmxname} PROPERTIES INSTALL_RPATH "$ORIGIN;${TensorFlow_LIBRARY_PATH}") + ${libgmxname} PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}") install( FILES dp_gmx_patch diff --git a/source/ipi/CMakeLists.txt b/source/ipi/CMakeLists.txt index 158f98aea5..2b23a63c76 100644 --- a/source/ipi/CMakeLists.txt +++ b/source/ipi/CMakeLists.txt @@ -44,13 +44,13 @@ if(APPLE) ${ipiname} PROPERTIES LINK_FLAGS "${extra_link_flags}" INSTALL_RPATH - "@loader_path/../${LIB_DIR}:${TensorFlow_LIBRARY_PATH}") + "@loader_path/../${LIB_DIR};${BACKEND_LIBRARY_PATH}") else() set_target_properties( ${ipiname} PROPERTIES LINK_FLAGS "-Wl,-rpath,'$ORIGIN'/../${LIB_DIR} ${extra_link_flags}" - INSTALL_RPATH "$ORIGIN/../${LIB_DIR}:${TensorFlow_LIBRARY_PATH}") + INSTALL_RPATH "$ORIGIN/../${LIB_DIR};${BACKEND_LIBRARY_PATH}") endif() if(CMAKE_TESTING_ENABLED) diff --git a/source/lmp/env.sh.in b/source/lmp/env.sh.in index 8c3a7aedc1..55b90be886 100644 --- a/source/lmp/env.sh.in +++ b/source/lmp/env.sh.in @@ -1,6 +1,6 @@ DEEPMD_ROOT=@CMAKE_INSTALL_PREFIX@ -TENSORFLOW_INCLUDE_DIRS="@TensorFlow_INCLUDE_DIRS@" -TENSORFLOW_LIBRARY_PATH="@TensorFlow_LIBRARY_PATH@" +TENSORFLOW_INCLUDE_DIRS="@BACKEND_INCLUDE_DIRS@" +TENSORFLOW_LIBRARY_PATH="@BACKEND_LIBRARY_PATH@" TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"` TF_LIBRARY_PATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -L/g"` diff --git a/source/lmp/env_c.sh.in b/source/lmp/env_c.sh.in index 67f6a63a1c..af010497ba 100644 --- a/source/lmp/env_c.sh.in +++ b/source/lmp/env_c.sh.in @@ -1,6 +1,6 @@ DEEPMD_ROOT=@CMAKE_INSTALL_PREFIX@ -TENSORFLOW_INCLUDE_DIRS="@TensorFlow_INCLUDE_DIRS@" -TENSORFLOW_LIBRARY_PATH="@TensorFlow_LIBRARY_PATH@" +TENSORFLOW_INCLUDE_DIRS="@BACKEND_INCLUDE_DIRS@" +TENSORFLOW_LIBRARY_PATH="@BACKEND_LIBRARY_PATH@" TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"` TF_LIBRARY_PATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -L/g"` diff --git a/source/lmp/env_py.sh.in b/source/lmp/env_py.sh.in index e6fc217c26..be48191f33 100644 --- a/source/lmp/env_py.sh.in +++ b/source/lmp/env_py.sh.in @@ -1,6 +1,6 @@ DEEPMD_ROOT=@CMAKE_INSTALL_PREFIX@ -TENSORFLOW_INCLUDE_DIRS="@TensorFlow_INCLUDE_DIRS@" -TENSORFLOW_LIBRARY_PATH="@TensorFlow_LIBRARY_PATH@" +TENSORFLOW_INCLUDE_DIRS="@BACKEND_INCLUDE_DIRS@" +TENSORFLOW_LIBRARY_PATH="@BACKEND_LIBRARY_PATH@" PYTHON_LIBRARY_PATH="@Python_LIBRARY_DIRS@" TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"` diff --git a/source/lmp/env_py_c.sh.in b/source/lmp/env_py_c.sh.in index b8affd7ff5..34874cc24c 100644 --- a/source/lmp/env_py_c.sh.in +++ b/source/lmp/env_py_c.sh.in @@ -1,6 +1,6 @@ DEEPMD_ROOT=@CMAKE_INSTALL_PREFIX@ -TENSORFLOW_INCLUDE_DIRS="@TensorFlow_INCLUDE_DIRS@" -TENSORFLOW_LIBRARY_PATH="@TensorFlow_LIBRARY_PATH@" +TENSORFLOW_INCLUDE_DIRS="@BACKEND_INCLUDE_DIRS@" +TENSORFLOW_LIBRARY_PATH="@BACKEND_LIBRARY_PATH@" PYTHON_LIBRARY_PATH="@Python_LIBRARY_DIRS@" TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"` diff --git a/source/lmp/plugin/CMakeLists.txt b/source/lmp/plugin/CMakeLists.txt index 4fdae7ac5b..f912059261 100644 --- a/source/lmp/plugin/CMakeLists.txt +++ b/source/lmp/plugin/CMakeLists.txt @@ -99,7 +99,7 @@ if(DEFINED LAMMPS_SOURCE_ROOT OR DEFINED LAMMPS_VERSION) "-Wl,-undefined,dynamic_lookup") else() set_target_properties( - ${libname} PROPERTIES INSTALL_RPATH "$ORIGIN;${TensorFlow_LIBRARY_PATH}" + ${libname} PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}" LINK_FLAGS "-rdynamic") endif() target_compile_definitions( diff --git a/source/op/CMakeLists.txt b/source/op/CMakeLists.txt index 7a92e259e0..6fc6422c6e 100644 --- a/source/op/CMakeLists.txt +++ b/source/op/CMakeLists.txt @@ -62,11 +62,10 @@ endif() if(APPLE) set_target_properties( ${LIB_DEEPMD_OP} PROPERTIES INSTALL_RPATH - "@loader_path;${TensorFlow_LIBRARY_PATH}") + "@loader_path;${BACKEND_LIBRARY_PATH}") else() set_target_properties( - ${LIB_DEEPMD_OP} PROPERTIES INSTALL_RPATH - "$ORIGIN;${TensorFlow_LIBRARY_PATH}") + ${LIB_DEEPMD_OP} PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}") endif() if(CMAKE_TESTING_ENABLED) target_link_libraries(${LIB_DEEPMD_OP} PRIVATE coverage_config) From 478fbf3a40eab1603fec4901f4ff77b0e151fab1 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 6 Apr 2024 23:08:04 -0400 Subject: [PATCH 03/30] feat: apply descriptor exclude_types to env mat stat (#3625) Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit 87d293a55d6815758b2071f3983b87d7c8587c72) Signed-off-by: Jinzhe Zeng --- deepmd/descriptor/descriptor.py | 3 ++- deepmd/descriptor/se_a.py | 12 ++++++++++++ deepmd/descriptor/se_atten.py | 20 +++++++++++++++++--- deepmd/descriptor/se_r.py | 12 ++++++++++++ 4 files changed, 43 insertions(+), 4 deletions(-) diff --git a/deepmd/descriptor/descriptor.py b/deepmd/descriptor/descriptor.py index bd731004cb..70c6cb2758 100644 --- a/deepmd/descriptor/descriptor.py +++ b/deepmd/descriptor/descriptor.py @@ -8,6 +8,7 @@ Dict, List, Optional, + Set, Tuple, ) @@ -393,7 +394,7 @@ def pass_tensors_from_frz_model( def build_type_exclude_mask( self, - exclude_types: List[Tuple[int, int]], + exclude_types: Set[Tuple[int, int]], ntypes: int, sel: List[int], ndescrpt: int, diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 3a924f796a..ddbf9a34e5 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -274,6 +274,18 @@ def __init__( sel_a=self.sel_a, sel_r=self.sel_r, ) + if len(self.exclude_types): + # exclude types applied to data stat + mask = self.build_type_exclude_mask( + self.exclude_types, + self.ntypes, + self.sel_a, + self.ndescrpt, + # for data stat, nloc == nall + self.place_holders["type"], + tf.size(self.place_holders["type"]), + ) + self.stat_descrpt *= tf.reshape(mask, tf.shape(self.stat_descrpt)) self.sub_sess = tf.Session(graph=sub_graph, config=default_tf_session_config) self.original_sel = None self.multi_task = multi_task diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index 8c1a179923..1dfe138ff7 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -4,6 +4,7 @@ from typing import ( List, Optional, + Set, Tuple, ) @@ -250,6 +251,19 @@ def __init__( sel_a=self.sel_all_a, sel_r=self.sel_all_r, ) + if len(self.exclude_types): + # exclude types applied to data stat + mask = self.build_type_exclude_mask_mixed( + self.exclude_types, + self.ntypes, + self.sel_a, + self.ndescrpt, + # for data stat, nloc == nall + self.place_holders["type"], + tf.size(self.place_holders["type"]), + self.nei_type_vec_t, # extra input for atten + ) + self.stat_descrpt *= tf.reshape(mask, tf.shape(self.stat_descrpt)) self.sub_sess = tf.Session(graph=sub_graph, config=default_tf_session_config) def compute_input_stats( @@ -640,7 +654,7 @@ def _pass_filter( inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 if len(self.exclude_types): - mask = self.build_type_exclude_mask( + mask = self.build_type_exclude_mask_mixed( self.exclude_types, self.ntypes, self.sel_a, @@ -1335,9 +1349,9 @@ def init_variables( ) ) - def build_type_exclude_mask( + def build_type_exclude_mask_mixed( self, - exclude_types: List[Tuple[int, int]], + exclude_types: Set[Tuple[int, int]], ntypes: int, sel: List[int], ndescrpt: int, diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index 062fd3c8a6..b603d7098b 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -185,6 +185,18 @@ def __init__( rcut_smth=self.rcut_smth, sel=self.sel_r, ) + if len(self.exclude_types): + # exclude types applied to data stat + mask = self.build_type_exclude_mask( + self.exclude_types, + self.ntypes, + self.sel_r, + self.ndescrpt, + # for data stat, nloc == nall + self.place_holders["type"], + tf.size(self.place_holders["type"]), + ) + self.stat_descrpt *= tf.reshape(mask, tf.shape(self.stat_descrpt)) self.sub_sess = tf.Session( graph=sub_graph, config=default_tf_session_config ) From b563e356f1f3329553fd59982854c8acb5e80589 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 8 Apr 2024 02:49:30 -0400 Subject: [PATCH 04/30] fix(tf): apply exclude types to se_atten_v2 switch (#3651) I construct a test case in which all old implementations fail. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit 09fd3bb2c423996899ea712e69d8471e0df52844) --- deepmd/descriptor/se_atten.py | 4 + source/tests/test_model_se_atten.py | 130 ++++++++++++++++++++++++++++ 2 files changed, 134 insertions(+) diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index 1dfe138ff7..ced2d08f18 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -672,6 +672,10 @@ def _pass_filter( tf.reshape(self.avg_looked_up, [-1, 1]), [1, self.ndescrpt] ), ) + self.recovered_switch *= tf.reshape( + tf.slice(tf.reshape(mask, [-1, 4]), [0, 0], [-1, 1]), + [-1, natoms[0], self.sel_all_a[0]], + ) else: inputs_i *= mask if nvnmd_cfg.enable and nvnmd_cfg.quantize_descriptor: diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py index 5417201a9f..9c71788725 100644 --- a/source/tests/test_model_se_atten.py +++ b/source/tests/test_model_se_atten.py @@ -874,3 +874,133 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): np.testing.assert_allclose(de[0], de[1], rtol=0, atol=deltae) np.testing.assert_allclose(df[0], df[1], rtol=0, atol=deltad) np.testing.assert_allclose(dv[0], dv[1], rtol=0, atol=deltad) + + def test_smoothness_of_stripped_type_embedding_smooth_model_excluded_types(self): + """test: auto-diff, continuity of e,f,v.""" + jfile = "water_se_atten.json" + jdata = j_loader(jfile) + + systems = j_must_have(jdata, "systems") + set_pfx = j_must_have(jdata, "set_prefix") + batch_size = 1 + test_size = 1 + rcut = j_must_have(jdata["model"]["descriptor"], "rcut") + + data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt=None) + + test_data = data.get_test() + numb_test = 1 + + jdata["model"]["descriptor"].pop("type", None) + jdata["model"]["descriptor"]["ntypes"] = 2 + jdata["model"]["descriptor"]["stripped_type_embedding"] = True + jdata["model"]["descriptor"]["smooth_type_embdding"] = True + jdata["model"]["descriptor"]["attn_layer"] = 1 + jdata["model"]["descriptor"]["rcut"] = 6.0 + jdata["model"]["descriptor"]["rcut_smth"] = 4.0 + jdata["model"]["descriptor"]["exclude_types"] = [[0, 0], [0, 1]] + jdata["model"]["descriptor"]["set_davg_zero"] = False + descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() + fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) + typeebd_param = jdata["model"]["type_embedding"] + typeebd = TypeEmbedNet( + ntypes=descrpt.get_ntypes(), + neuron=typeebd_param["neuron"], + activation_function=None, + resnet_dt=typeebd_param["resnet_dt"], + seed=typeebd_param["seed"], + uniform_seed=True, + padding=True, + ) + model = EnerModel(descrpt, fitting, typeebd) + + input_data = { + "coord": [test_data["coord"]], + "box": [test_data["box"]], + "type": [test_data["type"]], + "natoms_vec": [test_data["natoms_vec"]], + "default_mesh": [test_data["default_mesh"]], + } + model._compute_input_stat(input_data) + model.descrpt.bias_atom_e = data.compute_energy_shift() + # make the original implementation failed + model.descrpt.davg[:] += 1e-1 + + t_prop_c = tf.placeholder(tf.float32, [5], name="t_prop_c") + t_energy = tf.placeholder(GLOBAL_ENER_FLOAT_PRECISION, [None], name="t_energy") + t_force = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="t_force") + t_virial = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="t_virial") + t_atom_ener = tf.placeholder( + GLOBAL_TF_FLOAT_PRECISION, [None], name="t_atom_ener" + ) + t_coord = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_coord") + t_type = tf.placeholder(tf.int32, [None], name="i_type") + t_natoms = tf.placeholder(tf.int32, [model.ntypes + 2], name="i_natoms") + t_box = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None, 9], name="i_box") + t_mesh = tf.placeholder(tf.int32, [None], name="i_mesh") + is_training = tf.placeholder(tf.bool) + inputs_dict = {} + + model_pred = model.build( + t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + inputs_dict, + suffix=self.filename + + "-" + + inspect.stack()[0][3] + + "test_model_se_atten_model_compressible_excluded_types", + reuse=False, + ) + energy = model_pred["energy"] + force = model_pred["force"] + virial = model_pred["virial"] + + feed_dict_test = { + t_prop_c: test_data["prop_c"], + t_energy: test_data["energy"][:numb_test], + t_force: np.reshape(test_data["force"][:numb_test, :], [-1]), + t_virial: np.reshape(test_data["virial"][:numb_test, :], [-1]), + t_atom_ener: np.reshape(test_data["atom_ener"][:numb_test, :], [-1]), + t_coord: np.reshape(test_data["coord"][:numb_test, :], [-1]), + t_box: test_data["box"][:numb_test, :], + t_type: np.reshape(test_data["type"][:numb_test, :], [-1]), + t_natoms: test_data["natoms_vec"], + t_mesh: test_data["default_mesh"], + is_training: False, + } + sess = self.cached_session().__enter__() + sess.run(tf.global_variables_initializer()) + [pe, pf, pv] = sess.run([energy, force, virial], feed_dict=feed_dict_test) + pf, pv = pf.reshape(-1), pv.reshape(-1) + + eps = 1e-4 + delta = 1e-6 + fdf, fdv = finite_difference_fv( + sess, energy, feed_dict_test, t_coord, t_box, delta=eps + ) + np.testing.assert_allclose(pf, fdf, delta) + np.testing.assert_allclose(pv, fdv, delta) + + tested_eps = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] + for eps in tested_eps: + deltae = 1e-15 + deltad = 1e-15 + de, df, dv = check_smooth_efv( + sess, + energy, + force, + virial, + feed_dict_test, + t_coord, + jdata["model"]["descriptor"]["rcut"], + delta=eps, + ) + np.testing.assert_allclose(de[0], de[1], rtol=0, atol=deltae) + np.testing.assert_allclose(df[0], df[1], rtol=0, atol=deltad) + np.testing.assert_allclose(dv[0], dv[1], rtol=0, atol=deltad) From 1f2f6fdab7d06820f2c82eb332a196436bbaab2b Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 8 Apr 2024 20:29:30 -0400 Subject: [PATCH 05/30] fix: fix git version detection in docker_package_c.sh (#3658) Fix the following error in docker_package_c.sh ``` -- Found Git: /usr/bin/git (found version "2.25.1") fatal: detected dubious ownership in repository at '/root/deepmd-kit' To add an exception for this directory, call: git config --global --add safe.directory /root/deepmd-kit ``` In addition, fetching git tags in actions/checkout. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit 6eadc293052e896593d80a43b40adc6b5ae19832) --- .github/workflows/package_c.yml | 2 ++ source/install/docker_package_c.sh | 1 + 2 files changed, 3 insertions(+) diff --git a/.github/workflows/package_c.yml b/.github/workflows/package_c.yml index 82567609e4..e932dd0eba 100644 --- a/.github/workflows/package_c.yml +++ b/.github/workflows/package_c.yml @@ -19,6 +19,8 @@ jobs: filename: libdeepmd_c_cu11.tar.gz steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Package C library run: ./source/install/docker_package_c.sh env: diff --git a/source/install/docker_package_c.sh b/source/install/docker_package_c.sh index 544c175a0a..3846daf93b 100755 --- a/source/install/docker_package_c.sh +++ b/source/install/docker_package_c.sh @@ -5,6 +5,7 @@ SCRIPT_PATH=$(dirname $(realpath -s $0)) docker run --rm -v ${SCRIPT_PATH}/../..:/root/deepmd-kit -w /root/deepmd-kit \ tensorflow/build:${TENSORFLOW_BUILD_VERSION:-2.15}-python3.11 \ /bin/sh -c "pip install \"tensorflow${TENSORFLOW_VERSION}\" cmake \ + && git config --global --add safe.directory /root/deepmd-kit \ && cd /root/deepmd-kit/source/install \ && CC=/dt9/usr/bin/gcc \ CXX=/dt9/usr/bin/g++ \ From 2c84b22c01f11eda799ca39f7e492b22bd346c31 Mon Sep 17 00:00:00 2001 From: "A bot of @njzjz" <48687836+njzjz-bot@users.noreply.github.com> Date: Sat, 13 Apr 2024 19:59:31 -0400 Subject: [PATCH 06/30] feat(build): Add Git archives version files (#3669) When downloading the source code of a branch or a release from GitHub or the mirror, there is no Git version information, making setuptools-scm unable to read the version. The alternative way is to add a git archive file supported by setuptools-scm. See: * https://setuptools-scm.readthedocs.io/en/latest/usage/#git-archives (available in setuptools-scm 7.0.0) * https://git-scm.com/docs/git-archive#Documentation/git-archive.txt-export-subst * https://docs.github.com/en/repositories/working-with-files/using-files/downloading-source-code-archives Generated by the task: https://github.com/njzjz-bot/njzjz-bot/issues/4. (cherry picked from commit 3eb3d51e27a2853b68fc89dbc555947584a70a8d) Signed-off-by: Jinzhe Zeng --- .git_archival.txt | 4 ++++ .gitattributes | 2 ++ 2 files changed, 6 insertions(+) create mode 100644 .git_archival.txt diff --git a/.git_archival.txt b/.git_archival.txt new file mode 100644 index 0000000000..8fb235d704 --- /dev/null +++ b/.git_archival.txt @@ -0,0 +1,4 @@ +node: $Format:%H$ +node-date: $Format:%cI$ +describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$ +ref-names: $Format:%D$ diff --git a/.gitattributes b/.gitattributes index e77d446ba6..776405a339 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,4 @@ # do not show up detailed difference on GitHub source/3rdparty/* linguist-generated=true +source/3rdparty/README.md linguist-generated=false +.git_archival.txt export-subst From a6acc34b1500b7cd094ab8d352b3ff7f821f4d24 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 18 Apr 2024 04:02:31 -0400 Subject: [PATCH 07/30] fix(tf): fix foat32 for exclude_types in se_atten_v2 (#3682) Fix type issue in previous PR #3651. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit cac87152eb0010dba3246e24599e9fd8748a039a) --- deepmd/descriptor/se_atten.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index ced2d08f18..b9227916e4 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -673,7 +673,11 @@ def _pass_filter( ), ) self.recovered_switch *= tf.reshape( - tf.slice(tf.reshape(mask, [-1, 4]), [0, 0], [-1, 1]), + tf.slice( + tf.reshape(tf.cast(mask, self.filter_precision), [-1, 4]), + [0, 0], + [-1, 1], + ), [-1, natoms[0], self.sel_all_a[0]], ) else: From cdbb70da21451db83ff3c6b4aacdf330d6ba54ee Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:19:36 +0800 Subject: [PATCH 08/30] Fix typo in `smooth_type_embdding` (#3698) (cherry picked from commit 86b0bf8dfbae9e3a23eb7164f465a64e84ad3213) Signed-off-by: Jinzhe Zeng --- deepmd/descriptor/se_atten.py | 10 ++++++---- deepmd/descriptor/se_atten_v2.py | 2 +- deepmd_utils/utils/argcheck.py | 7 ++++--- doc/model/train-se-atten.md | 2 +- source/tests/test_model_compression_se_atten.py | 14 +++++++------- source/tests/test_model_se_atten.py | 4 ++-- 6 files changed, 21 insertions(+), 18 deletions(-) diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index b9227916e4..a9058cbdef 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -118,7 +118,7 @@ class DescrptSeAtten(DescrptSeA): stripped_type_embedding Whether to strip the type embedding into a separated embedding network. Default value will be True in `se_atten_v2` descriptor. - smooth_type_embdding + smooth_type_embedding When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True. Default value will be True in `se_atten_v2` descriptor. @@ -152,10 +152,12 @@ def __init__( attn_mask: bool = False, multi_task: bool = False, stripped_type_embedding: bool = False, - smooth_type_embdding: bool = False, + smooth_type_embedding: bool = False, **kwargs, ) -> None: - if not set_davg_zero and not (stripped_type_embedding and smooth_type_embdding): + if not set_davg_zero and not ( + stripped_type_embedding and smooth_type_embedding + ): warnings.warn( "Set 'set_davg_zero' False in descriptor 'se_atten' " "may cause unexpected incontinuity during model inference!" @@ -188,7 +190,7 @@ def __init__( if ntypes == 0: raise ValueError("`model/type_map` is not set or empty!") self.stripped_type_embedding = stripped_type_embedding - self.smooth = smooth_type_embdding + self.smooth = smooth_type_embedding self.ntypes = ntypes self.att_n = attn self.attn_layer = attn_layer diff --git a/deepmd/descriptor/se_atten_v2.py b/deepmd/descriptor/se_atten_v2.py index 784e02d84d..01c4d93ad8 100644 --- a/deepmd/descriptor/se_atten_v2.py +++ b/deepmd/descriptor/se_atten_v2.py @@ -110,6 +110,6 @@ def __init__( attn_mask=attn_mask, multi_task=multi_task, stripped_type_embedding=True, - smooth_type_embdding=True, + smooth_type_embedding=True, **kwargs, ) diff --git a/deepmd_utils/utils/argcheck.py b/deepmd_utils/utils/argcheck.py index d32d22e359..390742edcc 100644 --- a/deepmd_utils/utils/argcheck.py +++ b/deepmd_utils/utils/argcheck.py @@ -417,7 +417,7 @@ def descrpt_se_atten_common_args(): @descrpt_args_plugin.register("se_atten") def descrpt_se_atten_args(): doc_stripped_type_embedding = "Whether to strip the type embedding into a separated embedding network. Setting it to `False` will fall back to the previous version of `se_atten` which is non-compressible." - doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." + doc_smooth_type_embedding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" return [ @@ -430,11 +430,12 @@ def descrpt_se_atten_args(): doc=doc_stripped_type_embedding, ), Argument( - "smooth_type_embdding", + "smooth_type_embedding", bool, optional=True, default=False, - doc=doc_smooth_type_embdding, + alias=["smooth_type_embdding"], + doc=doc_smooth_type_embedding, ), Argument( "set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 5a47170958..79c7149a61 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -114,7 +114,7 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se ```json "stripped_type_embedding": true, - "smooth_type_embdding": true, + "smooth_type_embedding": true, "set_davg_zero": false ``` diff --git a/source/tests/test_model_compression_se_atten.py b/source/tests/test_model_compression_se_atten.py index 0752635a98..5f46f5aa86 100644 --- a/source/tests/test_model_compression_se_atten.py +++ b/source/tests/test_model_compression_se_atten.py @@ -37,27 +37,27 @@ def _file_delete(file): { "se_atten precision": "float64", "type embedding precision": "float64", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float64", "type embedding precision": "float64", - "smooth_type_embdding": False, + "smooth_type_embedding": False, }, { "se_atten precision": "float64", "type embedding precision": "float32", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float32", "type embedding precision": "float64", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, { "se_atten precision": "float32", "type embedding precision": "float32", - "smooth_type_embdding": True, + "smooth_type_embedding": True, }, ] @@ -82,8 +82,8 @@ def _init_models(): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["sel"] = 120 jdata["model"]["descriptor"]["attn_layer"] = 0 - jdata["model"]["descriptor"]["smooth_type_embdding"] = tests[i][ - "smooth_type_embdding" + jdata["model"]["descriptor"]["smooth_type_embedding"] = tests[i][ + "smooth_type_embedding" ] jdata["model"]["type_embedding"] = {} jdata["model"]["type_embedding"]["precision"] = tests[i][ diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py index 9c71788725..1b9dc6c1c9 100644 --- a/source/tests/test_model_se_atten.py +++ b/source/tests/test_model_se_atten.py @@ -751,7 +751,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["smooth_type_embdding"] = True + jdata["model"]["descriptor"]["smooth_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 1 jdata["model"]["descriptor"]["rcut"] = 6.0 jdata["model"]["descriptor"]["rcut_smth"] = 4.0 @@ -894,7 +894,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model_excluded_types(self) jdata["model"]["descriptor"].pop("type", None) jdata["model"]["descriptor"]["ntypes"] = 2 jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["smooth_type_embdding"] = True + jdata["model"]["descriptor"]["smooth_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 1 jdata["model"]["descriptor"]["rcut"] = 6.0 jdata["model"]["descriptor"]["rcut_smth"] = 4.0 From b3b6a4e312bac1326ec3d950c6f832bca357bbe5 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 27 Apr 2024 10:47:38 -0400 Subject: [PATCH 09/30] build(deps): bump tar from 6.1.14 to 6.2.1 in /source/nodejs (#3714) Closes #3665. Bumps [tar](https://github.com/isaacs/node-tar) from 6.1.14 to 6.2.1. - [Release notes](https://github.com/isaacs/node-tar/releases) - [Changelog](https://github.com/isaacs/node-tar/blob/main/CHANGELOG.md) - [Commits](https://github.com/isaacs/node-tar/compare/v6.1.14...v6.2.1) --- updated-dependencies: - dependency-name: tar dependency-type: indirect ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit 981ce449cf5346f42cbd2f36461d0b801e7de177) --- source/nodejs/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/nodejs/yarn.lock b/source/nodejs/yarn.lock index fb0c093f11..be1be837d6 100644 --- a/source/nodejs/yarn.lock +++ b/source/nodejs/yarn.lock @@ -619,9 +619,9 @@ strip-ansi@^6.0.1: ansi-regex "^5.0.1" tar@^6.1.11, tar@^6.1.2: - version "6.1.14" - resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.14.tgz#e87926bec1cfe7c9e783a77a79f3e81c1cfa3b66" - integrity sha512-piERznXu0U7/pW7cdSn7hjqySIVTYT6F76icmFk7ptU7dDYlXTm5r9A6K04R2vU3olYgoKeo1Cg3eeu5nhftAw== + version "6.2.1" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" + integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A== dependencies: chownr "^2.0.0" fs-minipass "^2.0.0" From 3407e82707e5b1c44ffbfa1deb5eb7504159e7e0 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 30 Apr 2024 02:31:05 -0400 Subject: [PATCH 10/30] build: unpin tensorflow version on windows (#3721) https://github.com/tensorflow/tensorflow/issues/61830 got fixed. ## Summary by CodeRabbit - **Bug Fixes** - Updated the TensorFlow version constraint for better compatibility on Windows systems. Signed-off-by: Jinzhe Zeng (cherry picked from commit 8fb7e91cdc33a0b61e2d9d72b6980cafaa2f7754) --- backend/find_tensorflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index a4edfa313d..d85c3a6a18 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -149,7 +149,7 @@ def get_tf_requirement(tf_version: str = "") -> dict: "tensorflow-cpu; platform_machine!='aarch64' and (platform_machine!='arm64' or platform_system != 'Darwin')", "tensorflow; platform_machine=='aarch64' or (platform_machine=='arm64' and platform_system == 'Darwin')", # https://github.com/tensorflow/tensorflow/issues/61830 - "tensorflow-cpu<2.15; platform_system=='Windows'", + "tensorflow-cpu!=2.15.*; platform_system=='Windows'", *extra_requires, ], "gpu": [ From 5510794490bdb71ac1a4efb4e74e189f347b6d13 Mon Sep 17 00:00:00 2001 From: nahso <47053538+nahso@users.noreply.github.com> Date: Tue, 30 Apr 2024 15:01:24 +0800 Subject: [PATCH 11/30] test: set more lossy precision requirements (#3726) ## Summary by CodeRabbit - **Tests** - Updated test cases to specify precision digits directly, enhancing the accuracy of model compression tests. (cherry picked from commit ee47e75748e70ac979ed54379b316e2465bcaf78) --- .../tests/test_model_compression_se_atten.py | 42 +++++++++---------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/source/tests/test_model_compression_se_atten.py b/source/tests/test_model_compression_se_atten.py index 5f46f5aa86..9c245d8900 100644 --- a/source/tests/test_model_compression_se_atten.py +++ b/source/tests/test_model_compression_se_atten.py @@ -28,36 +28,36 @@ def _file_delete(file): os.remove(file) -# 4 tests: -# - type embedding FP64, se_atten FP64 -# - type embedding FP64, se_atten FP32 -# - type embedding FP32, se_atten FP64 -# - type embedding FP32, se_atten FP32 tests = [ { "se_atten precision": "float64", "type embedding precision": "float64", "smooth_type_embedding": True, + "precision_digit": 10, }, { "se_atten precision": "float64", "type embedding precision": "float64", "smooth_type_embedding": False, + "precision_digit": 10, }, { "se_atten precision": "float64", "type embedding precision": "float32", "smooth_type_embedding": True, + "precision_digit": 2, }, { "se_atten precision": "float32", "type embedding precision": "float64", "smooth_type_embedding": True, + "precision_digit": 2, }, { "se_atten precision": "float32", "type embedding precision": "float32", "smooth_type_embedding": True, + "precision_digit": 2, }, ] @@ -158,10 +158,6 @@ def _init_models_exclude_types(): INPUTS_ET, FROZEN_MODELS_ET, COMPRESSED_MODELS_ET = _init_models_exclude_types() -def _get_default_places(nth_test): - return 10 if nth_test == 0 else 3 - - @unittest.skipIf( parse_version(tf.__version__) < parse_version("2"), f"The current tf version {tf.__version__} is too low to run the new testing model.", @@ -200,7 +196,7 @@ def test_attrs(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] self.assertEqual(dp_original.get_ntypes(), 2) self.assertAlmostEqual(dp_original.get_rcut(), 6.0, places=default_places) @@ -218,7 +214,7 @@ def test_1frame(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0 = dp_original.eval( self.coords, self.box, self.atype, atomic=False @@ -244,7 +240,7 @@ def test_1frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0, ae0, av0 = dp_original.eval( self.coords, self.box, self.atype, atomic=True @@ -276,7 +272,7 @@ def test_2frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] coords2 = np.concatenate((self.coords, self.coords)) box2 = np.concatenate((self.box, self.box)) @@ -346,7 +342,7 @@ def test_1frame(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0 = dp_original.eval( self.coords, self.box, self.atype, atomic=False @@ -372,7 +368,7 @@ def test_1frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0, ae0, av0 = dp_original.eval( self.coords, self.box, self.atype, atomic=True @@ -404,7 +400,7 @@ def test_2frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] coords2 = np.concatenate((self.coords, self.coords)) ee0, ff0, vv0, ae0, av0 = dp_original.eval( @@ -473,7 +469,7 @@ def test_1frame(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0 = dp_original.eval( self.coords, self.box, self.atype, atomic=False @@ -505,7 +501,7 @@ def test_1frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0, ae0, av0 = dp_original.eval( self.coords, self.box, self.atype, atomic=True @@ -535,7 +531,7 @@ def test_1frame_atm(self): def test_ase(self): for i in range(len(tests)): - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] from ase import ( Atoms, ) @@ -628,7 +624,7 @@ def test_attrs(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] self.assertEqual(dp_original.get_ntypes(), 2) self.assertAlmostEqual(dp_original.get_rcut(), 6.0, places=default_places) @@ -646,7 +642,7 @@ def test_1frame(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0 = dp_original.eval( self.coords, self.box, self.atype, atomic=False @@ -672,7 +668,7 @@ def test_1frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] ee0, ff0, vv0, ae0, av0 = dp_original.eval( self.coords, self.box, self.atype, atomic=True @@ -704,7 +700,7 @@ def test_2frame_atm(self): for i in range(len(tests)): dp_original = self.dp_originals[i] dp_compressed = self.dp_compresseds[i] - default_places = _get_default_places(i) + default_places = tests[i]["precision_digit"] coords2 = np.concatenate((self.coords, self.coords)) box2 = np.concatenate((self.box, self.box)) From 776a0bedfb166fa4c6e4f3a5a3888a8a86daec34 Mon Sep 17 00:00:00 2001 From: nahso <47053538+nahso@users.noreply.github.com> Date: Mon, 6 May 2024 04:28:21 +0800 Subject: [PATCH 12/30] Clarifiy se_atten_v2 compression doc (#3727) https://github.com/deepmodeling/deepmd-kit/issues/3643 - **Documentation** - Simplified the description for the number of attention layers in the code documentation. - Added a notice about model compression compatibility for `se_atten_v2` descriptor in the documentation. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: Jinzhe Zeng (cherry picked from commit 62832e85d3c370c2ce4c66c559ee88094a544198) Signed-off-by: Jinzhe Zeng --- deepmd_utils/utils/argcheck.py | 2 +- doc/freeze/compress.md | 2 ++ doc/model/train-se-atten.md | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd_utils/utils/argcheck.py b/deepmd_utils/utils/argcheck.py index 390742edcc..29886dd54f 100644 --- a/deepmd_utils/utils/argcheck.py +++ b/deepmd_utils/utils/argcheck.py @@ -365,7 +365,7 @@ def descrpt_se_atten_common_args(): doc_seed = "Random seed for parameter initialization" doc_exclude_types = "The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1." doc_attn = "The length of hidden vectors in attention layers" - doc_attn_layer = "The number of attention layers. Note that model compression of `se_atten` is only enabled when attn_layer==0 and stripped_type_embedding is True" + doc_attn_layer = "The number of attention layers." doc_attn_dotr = "Whether to do dot product with the normalized relative coordinates" doc_attn_mask = "Whether to do mask on the diagonal in the attention matrix" diff --git a/doc/freeze/compress.md b/doc/freeze/compress.md index 54b9baf7ca..87d4699d2b 100644 --- a/doc/freeze/compress.md +++ b/doc/freeze/compress.md @@ -148,6 +148,8 @@ The model compression interface requires the version of DeePMD-kit used in the o Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten_v2` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + **Available activation functions for descriptor:** - tanh diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 79c7149a61..0ed73fe203 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -120,6 +120,8 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + ### Fitting `"ener"` DPA-1 only supports `"ener"` fitting type, and you can refer [here](train-energy.md) for detailed information. From 96139dc3a90001c2237d989501fdb8353f06e4a4 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 17 May 2024 23:14:43 -0400 Subject: [PATCH 13/30] style: enable W rules (#3793) Fix #3789. - **Documentation** - Corrected a minor formatting issue in the quick start guide for displaying a file path. - **Chores** - Added a new linting rule for warnings to improve code quality. - **Tests** - Improved regular expression handling in test cases by using raw string notation. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit 81b5949e16a2c62dd733154673af418713d6803d) Signed-off-by: Jinzhe Zeng --- doc/getting-started/quick_start.ipynb | 2 +- pyproject.toml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index 67674c4654..67462c91d4 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -396,7 +396,7 @@ } ], "source": [ - "! cat DeePMD-kit_Tutorial/00.data/training_data/type.raw " + "! cat DeePMD-kit_Tutorial/00.data/training_data/type.raw" ] }, { diff --git a/pyproject.toml b/pyproject.toml index 1b4d459a7d..9eb1b71ac1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -287,6 +287,7 @@ force_grid_wrap = 1 [tool.ruff.lint] select = [ "E", # errors + "W", # warning "F", # pyflakes "D", # pydocstyle "UP", # pyupgrade From afb059cc2d705df724ca586f44f7094df2cbadd4 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 23 May 2024 16:12:17 -0400 Subject: [PATCH 14/30] lmp: improve error message when compute/fix is not found (#3801) ## Summary by CodeRabbit - **Bug Fixes** - Enhanced error handling to ensure required computes or fixes are present before proceeding with operations, improving robustness and reliability. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> (cherry picked from commit 7b16911ddf9498d2384a0b68dc6f7505e1a7d353) --- source/lmp/pair_deepmd.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 90aa453143..1a64fdb869 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -218,7 +218,9 @@ void PairDeepMD::make_fparam_from_compute(vector &fparam) { int icompute = modify->find_compute(compute_fparam_id); Compute *compute = modify->compute[icompute]; - assert(compute); + if (!compute) { + error->all(FLERR, "compute id is not found: " + compute_fparam_id); + } fparam.resize(dim_fparam); if (dim_fparam == 1) { @@ -245,7 +247,9 @@ void PairDeepMD::make_aparam_from_compute(vector &aparam) { int icompute = modify->find_compute(compute_aparam_id); Compute *compute = modify->compute[icompute]; - assert(compute); + if (!compute) { + error->all(FLERR, "compute id is not found: " + compute_aparam_id); + } int nlocal = atom->nlocal; aparam.resize(static_cast(dim_aparam) * nlocal); @@ -276,7 +280,9 @@ void PairDeepMD::make_ttm_fparam(vector &fparam) { ttm_fix = dynamic_cast(modify->fix[ii]); } } - assert(ttm_fix); + if (!ttm_fix) { + error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); + } fparam.resize(dim_fparam); @@ -315,7 +321,9 @@ void PairDeepMD::make_ttm_aparam(vector &daparam) { ttm_fix = dynamic_cast(modify->fix[ii]); } } - assert(ttm_fix); + if (!ttm_fix) { + error->all(FLERR, "fix ttm id is not found: " + ttm_fix_id); + } // modify double **x = atom->x; int *mask = atom->mask; From f24c025504b9e2e2d89f79cf5773b55b9ef4033a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yifan=20Li=E6=9D=8E=E4=B8=80=E5=B8=86?= Date: Sun, 26 May 2024 21:02:00 -0400 Subject: [PATCH 15/30] Add a reminder for the illegal memory error (#3822) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using the GPU version of the neighbor stat code, one may encounter the following issue and the training will stop: ``` [2024-05-24 23:00:42,027] DEEPMD INFO Adjust batch size from 1024 to 2048 [2024-05-24 23:00:42,139] DEEPMD INFO Adjust batch size from 2048 to 4096 [2024-05-24 23:00:42,285] DEEPMD INFO Adjust batch size from 4096 to 8192 [2024-05-24 23:00:42,628] DEEPMD INFO Adjust batch size from 8192 to 16384 [2024-05-24 23:00:43,180] DEEPMD INFO Adjust batch size from 16384 to 32768 [2024-05-24 23:00:44,341] DEEPMD INFO Adjust batch size from 32768 to 65536 [2024-05-24 23:00:46,713] DEEPMD INFO Adjust batch size from 65536 to 131072 2024-05-24 23:00:52.071120: E tensorflow/compiler/xla/stream_executor/cuda/cuda_event.cc:29] Error polling for event status: failed to query event: CUDA_ERROR_ILLEGAL_ADDRESS: an illegal memory access was encountered 2024-05-24 23:00:52.075435: F tensorflow/core/common_runtime/device/device_event_mgr.cc:223] Unexpected Event status: 1 /bin/sh: line 1: 1397100 Aborted ``` This should be due to some issue of TensorFlow. One may use the environment variable `DP_INFER_BATCH_SIZE` to avoid this issue. This PR remind the user to set a small `DP_INFER_BATCH_SIZE` to avoid this issue. - **Bug Fixes** - Added a log message to guide users on setting the `DP_INFER_BATCH_SIZE` environment variable to avoid TensorFlow illegal memory access issues. --------- Signed-off-by: Yifan Li李一帆 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> (cherry picked from commit d754672955aa683310a761b8e1292e6192ea116f) Signed-off-by: Jinzhe Zeng --- deepmd_utils/utils/batch_size.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deepmd_utils/utils/batch_size.py b/deepmd_utils/utils/batch_size.py index 1b93a51242..8a60cc69fb 100644 --- a/deepmd_utils/utils/batch_size.py +++ b/deepmd_utils/utils/batch_size.py @@ -62,6 +62,11 @@ def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: self.maximum_working_batch_size = initial_batch_size if self.is_gpu_available(): self.minimal_not_working_batch_size = 2**31 + log.info( + "If you encounter the error 'an illegal memory access was encountered', this may be due to a TensorFlow issue. " + "To avoid this, set the environment variable DP_INFER_BATCH_SIZE to a smaller value than the last adjusted batch size. " + "The environment variable DP_INFER_BATCH_SIZE controls the inference batch size (nframes * natoms). " + ) else: self.minimal_not_working_batch_size = ( self.maximum_working_batch_size + 1 From b53fd5a058333cdd984759b53d6553c5b5234822 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 29 May 2024 09:43:20 -0400 Subject: [PATCH 16/30] fix: fix ipi package (#3835) The original package was yanked. xref: https://github.com/i-pi/i-pi/pull/290 ## Summary by CodeRabbit - **Chores** - Updated the dependency name from `"i-PI"` to `"ipi"` in the project configuration file for consistency. Signed-off-by: Jinzhe Zeng (cherry picked from commit 0afe8bf7c7c6b0fdb0f0bfbec14d13db7d3c3405) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9eb1b71ac1..373b763146 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ lmp = [ "lammps~=2023.8.2.3.0", ] ipi = [ - "i-PI", + "ipi", ] gui = [ "dpgui", From df4d2f0101c8b51cc5f391b8acb2dbfe10169a09 Mon Sep 17 00:00:00 2001 From: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com> Date: Tue, 2 Jul 2024 03:59:48 -0400 Subject: [PATCH 17/30] docs: add document equations for `se_atten_v2` (#3828) Solve issue #3139 `"se_atten_v2"` is inherited from `"se_atten"` with the following parameter modifications: ```json "tebd_input_mode": "strip", "smooth_type_embedding": true, "set_davg_zero": false ``` I add the equations for parameter `"tebd_input_mode"`. ## Summary by CodeRabbit - **Documentation** - Detailed the default value and functionality of the `"tebd_input_mode"` parameter. - Highlighted the performance superiority of `"se_atten_v2"` over `"se_atten"`. - Specified a model compression requirement for `se_atten_v2`. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> (cherry picked from commit e3acea5e87fab027fbc347beaf7a234b351ee9d3) Signed-off-by: Jinzhe Zeng --- doc/model/train-se-atten.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 0ed73fe203..fef910b0a7 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -17,7 +17,11 @@ Attention-based descriptor $\mathcal{D}^i \in \mathbb{R}^{M \times M_{<}}$, whic ``` where $\hat{\mathcal{G}}^i$ represents the embedding matrix $\mathcal{G}^i$ after additional self-attention mechanism and $\mathcal{R}^i$ is defined by the full case in the [`se_e2_a`](./train-se-e2-a.md). -Note that we obtain $\mathcal{G}^i$ using the type embedding method by default in this descriptor. +Note that we obtain $\mathcal{G}^i$ using the type embedding method by default in this descriptor. By default, we concat $s(r_{ij})$ and the type embeddings of central and neighboring atoms $\mathcal{A}^i$ and $\mathcal{A}^j$ as input of the embedding network $\mathcal{N}_{e,2}$: + +```math + (\mathcal{G}^i)_j = \mathcal{N}_{e,2}(\{s(r_{ij}), \mathcal{A}^i, \mathcal{A}^j\}) \quad \mathrm{or}\quad(\mathcal{G}^i)_j = \mathcal{N}_{e,2}(\{s(r_{ij}), \mathcal{A}^j\}) +``` To perform the self-attention mechanism, the queries $\mathcal{Q}^{i,l} \in \mathbb{R}^{N_c\times d_k}$, keys $\mathcal{K}^{i,l} \in \mathbb{R}^{N_c\times d_k}$, and values $\mathcal{V}^{i,l} \in \mathbb{R}^{N_c\times d_v}$ are first obtained: @@ -118,6 +122,12 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se "set_davg_zero": false ``` +You need to use descriptor `"se_atten_v2"` and do not need to set `stripped_type_embedding` and `smooth_type_embedding` because the default value of `stripped_type_embedding` is `true`, and the default value of `smooth_type_embedding` is `true`. When `stripped_type_embedding` is set to `true`, the embedding matrix $\mathcal{G}^i$ is constructed as: + +```math + (\mathcal{G}^i)_j = \mathcal{N}_{e,2}(s(r_{ij})) + \mathcal{N}_{e,2}(s(r_{ij})) \odot ({N}_{e,2}(\{\mathcal{A}^i, \mathcal{A}^j\}) \odot s(r_{ij})) \quad \mathrm{or} +``` + Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`. Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. From c39dfe606fb1d5721e871acd731bd7218797d0fd Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Jul 2024 00:30:41 -0400 Subject: [PATCH 18/30] fix(tf): prevent `fitting_attr` variable scope from becoming `fitting_attr_1` (#3930) Fix #3928. Prevent `fitting_attr` from becoming `fitting_attr_1`. ## Summary by CodeRabbit - **Refactor** - Improved TensorFlow variable scope management by switching to `tf.AUTO_REUSE` to streamline code and reduce the likelihood of variable reuse conflicts. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit e809e64748c2506b25b11ebb52f2bbbb59bc883e) --- deepmd/descriptor/se_a_mask.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deepmd/descriptor/se_a_mask.py b/deepmd/descriptor/se_a_mask.py index d573755ea5..c5770721f6 100644 --- a/deepmd/descriptor/se_a_mask.py +++ b/deepmd/descriptor/se_a_mask.py @@ -307,8 +307,9 @@ def build( aparam[:, :] is the real/virtual sign for each atom. """ aparam = input_dict["aparam"] - with tf.variable_scope("fitting_attr" + suffix, reuse=reuse): - t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool) + t_aparam_nall = tf.constant( + True, name=f"fitting_attr{suffix}/aparam_nall", dtype=tf.bool + ) self.mask = tf.cast(aparam, tf.int32) self.mask = tf.reshape(self.mask, [-1, natoms[1]]) From 9feecdc834b2ea3d812d92e2dc46a54d47fae2ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 21:53:38 +0000 Subject: [PATCH 19/30] build(deps): bump pypa/cibuildwheel from 2.17 to 2.18 (#3777) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.17 to 2.18.
Release notes

Sourced from pypa/cibuildwheel's releases.

v2.18.0

  • ✨ Adds CPython 3.13 support, under the prerelease flag CIBW_PRERELEASE_PYTHONS. This version of cibuildwheel uses 3.13.0b1. Free-threading mode is not available yet, waiting on official binaries (planned for beta 2) and pip support. While CPython is in beta, the ABI can change, so your wheels might not be compatible with the final release. For this reason, we don't recommend distributing wheels until RC1, at which point 3.13 will be available in cibuildwheel without the flag. (#1816)
  • ✨ Musllinux now defaults to musllinux_1_2. You can set the older musllinux_1_1 via config if needed. (#1817)
  • 🛠 No longer pre-seed setuptools/wheel in virtual environments (#1819)
  • 🛠 Respect the constraints file when building with pip, matching build (#1818)
  • 🛠 Use uv to compile our pinned dependencies, 10x faster and doesn't require special setup (#1778)
  • 🐛 Fix an issue with the schema (#1788)
  • 📚 Document the new delocate error checking macOS versions (#1766)
  • 📚 Document Rust builds (#1816)
  • 📚 Speed up our readthedocs builds with uv, 26 seconds -> 6 seconds to install dependencies (#1816)
Changelog

Sourced from pypa/cibuildwheel's changelog.


title: Changelog

Changelog

v2.18.0

12 May 2024

  • ✨ Adds CPython 3.13 support, under the prerelease flag CIBW_PRERELEASE_PYTHONS. This version of cibuildwheel uses 3.13.0b1. Free-threading mode is not available yet, waiting on official binaries (planned for beta 2) and pip support.

    While CPython is in beta, the ABI can change, so your wheels might not be compatible with the final release. For this reason, we don't recommend distributing wheels until RC1, at which point 3.13 will be available in cibuildwheel without the flag. (#1816)

  • ✨ Musllinux now defaults to musllinux_1_2. You can set the older manylinux_1_1 via config if needed. (#1817)

  • 🛠 No longer pre-seed setuptools/wheel in virtual environments (#1819)

  • 🛠 Respect the constraints file when building with pip, matching build (#1818)

  • 🛠 Use uv to compile our pinned dependencies, 10x faster and doesn't require special setup (#1778)

  • 🐛 Fix an issue with the schema (#1788)

  • 📚 Document the new delocate error checking macOS versions (#1766)

  • 📚 Document Rust builds (#1816)

  • 📚 Speed up our readthedocs builds with uv, 26 seconds -> 6 seconds to install dependencies (#1816)

v2.17.0

11 March 2024

  • 🌟 Adds the ability to inherit configuration in TOML overrides. This makes certain configurations much simpler. If you're overriding an option like before-build or environment, and you just want to add an extra command or environment variable, you can just append (or prepend) to the previous config. See the docs for more information. (#1730)
  • 🌟 Adds official support for native arm64 macOS GitHub runners. To use them, just specify macos-14 as an os of your job in your workflow file. You can also keep macos-13 in your build matrix to build x86_64. Check out the new GitHub Actions example config.
  • ✨ You no longer need to specify --platform to run cibuildwheel locally! Instead it will detect your platform automatically. This was a safety feature, no longer necessary. (#1727)
  • 🛠 Removed setuptools and wheel pinned versions. This only affects old-style projects without a pyproject.toml, projects with pyproject.toml are already getting fresh versions of their build-system.requires installed into an isolated environment. (#1725)
  • 🛠 Improve how the GitHub Action passes arguments (#1757)
  • 🛠 Remove a system-wide install of pipx in the GitHub Action (#1745)
  • 🐛 No longer will cibuildwheel override the PIP_CONSTRAINT environment variable when using the build frontend. Instead it will be extended. (#1675)
  • 🐛 Fix a bug where building and testing both x86_86 and arm64 wheels on the same runner caused the wrong architectures in the test environment (#1750)
  • 🐛 Fix a bug that prevented testing a CPython 3.8 wheel targeting macOS 11+ on x86_64 (#1768)
  • 📚 Moved the docs onto the official PyPA domain - they're now available at https://cibuildwheel.pypa.io . (#1775)
  • 📚 Docs and examples improvements (#1762, #1734)

v2.16.5

30 January 2024

  • 🐛 Fix an incompatibility with the GitHub Action and new GitHub Runner images for Windows that bundle Powershell 7.3+ (#1741)
  • 🛠 Preliminary support for new macos-14 arm64 runners (#1743)

v2.16.4

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.17&new-version=2.18)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit 33f00c8ff1e73986c0e7fae13d0facf842fd3667) --- .github/workflows/build_wheel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index dd6b40c0b9..eb8dbb83a2 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -81,7 +81,7 @@ jobs: rm -rf .git if: matrix.dp_pkg_name == 'deepmd-kit-cu11' - name: Build wheels - uses: pypa/cibuildwheel@v2.17 + uses: pypa/cibuildwheel@v2.18 env: CIBW_BUILD_VERBOSITY: 1 CIBW_ARCHS: all From 95a7dd8d27cf55a3cdf39afe6920a6a3198bee63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 20:40:36 -0400 Subject: [PATCH 20/30] build(deps): bump docker/build-push-action from 5 to 6 (#3882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6.
Release notes

Sourced from docker/build-push-action's releases.

v6.0.0

[!NOTE] This major release adds support for generating Build summary and exporting build record for your build. You can disable this feature by setting DOCKER_BUILD_NO_SUMMARY: true environment variable in your workflow.

Full Changelog: https://github.com/docker/build-push-action/compare/v5.4.0...v6.0.0

v5.4.0

Full Changelog: https://github.com/docker/build-push-action/compare/v5.3.0...v5.4.0

v5.3.0

Full Changelog: https://github.com/docker/build-push-action/compare/v5.2.0...v5.3.0

v5.2.0

Full Changelog: https://github.com/docker/build-push-action/compare/v5.1.0...v5.2.0

v5.1.0

Full Changelog: https://github.com/docker/build-push-action/compare/v5.0.0...v5.1.0

Commits
  • c382f71 Merge pull request #1120 from crazy-max/build-summary
  • 5a5b70d chore: update generated content
  • dc24cf9 don't generate summary for cloud driver
  • 667cb22 DOCKER_BUILD_NO_SUMMARY env to disable summary
  • d880b19 generate build summary
  • e51051a export build record and upload artifact
  • 86c2bd0 Merge pull request #1137 from docker/dependabot/npm_and_yarn/braces-3.0.3
  • 268d2b1 Merge pull request #1138 from docker/dependabot/npm_and_yarn/docker/actions-t...
  • 2b8dc7f chore: update generated content
  • 840c12b chore(deps): Bump @​docker/actions-toolkit from 0.25.1 to 0.26.0
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=5&new-version=6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Signed-off-by: Jinzhe Zeng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng (cherry picked from commit e2b659a4634286fc9e09fe8decc15555726216f9) --- .github/workflows/build_wheel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index eb8dbb83a2..273fe7dccd 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -161,7 +161,7 @@ jobs: images: ghcr.io/deepmodeling/deepmd-kit - name: Build and push Docker image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: source/install/docker push: ${{ github.repository_owner == 'deepmodeling' && github.event_name == 'push' && github.actor != 'dependabot[bot]' }} From 02f90a49a2f9252fd06e5fffc572ece2f74ea449 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Jul 2024 04:07:20 -0400 Subject: [PATCH 21/30] sync .pre-commit-config.yaml with devel Signed-off-by: Jinzhe Zeng --- .pre-commit-config.yaml | 25 +- CITATIONS.bib | 611 +++++++++++++---------- backend/read_env.py | 2 +- data/raw/copy_raw.py | 2 +- deepmd/cluster/local.py | 2 +- deepmd/descriptor/descriptor.py | 12 +- deepmd/descriptor/loc_frame.py | 8 +- deepmd/descriptor/se.py | 8 +- deepmd/descriptor/se_a.py | 12 +- deepmd/descriptor/se_atten.py | 8 +- deepmd/descriptor/se_r.py | 8 +- deepmd/descriptor/se_t.py | 8 +- deepmd/entrypoints/compress.py | 11 +- deepmd/entrypoints/freeze.py | 4 +- deepmd/entrypoints/neighbor_stat.py | 4 +- deepmd/entrypoints/test.py | 11 +- deepmd/fit/dos.py | 10 +- deepmd/fit/ener.py | 10 +- deepmd/fit/fitting.py | 3 +- deepmd/infer/deep_pot.py | 8 +- deepmd/model/dos.py | 2 +- deepmd/model/ener.py | 2 +- deepmd/model/model.py | 6 +- deepmd/model/tensor.py | 2 +- deepmd/nvnmd/entrypoints/train.py | 2 +- deepmd/nvnmd/entrypoints/wrap.py | 2 +- deepmd/nvnmd/utils/fio.py | 2 +- deepmd/train/trainer.py | 21 +- deepmd/utils/convert.py | 3 +- deepmd/utils/finetune.py | 3 +- deepmd/utils/multi_init.py | 3 +- deepmd/utils/neighbor_stat.py | 7 +- deepmd_utils/entrypoints/doc.py | 2 +- deepmd_utils/env.py | 4 +- deepmd_utils/main.py | 2 +- deepmd_utils/utils/data.py | 4 +- deepmd_utils/utils/data_system.py | 2 +- deepmd_utils/utils/path.py | 2 +- doc/conf.py | 2 +- source/api_c/include/deepmd.hpp | 12 +- source/api_c/tests/test_deeppot_a_hpp.cc | 2 +- source/api_cc/include/DataModifier.h | 4 +- source/api_cc/include/DeepPot.h | 4 +- source/api_cc/include/DeepTensor.h | 4 +- source/api_cc/include/common.h | 4 +- source/api_cc/src/DataModifier.cc | 2 +- source/api_cc/tests/test_deeppot_a.cc | 2 +- source/api_cc/tests/test_deeppot_r.cc | 2 +- source/install/build_tf.py | 9 +- source/lib/include/errors.h | 8 +- source/lib/include/neighbor_list.h | 6 +- source/lmp/pppm_dplr.h | 2 +- source/md/include/Integrator.h | 2 +- source/md/include/Tabulated.h | 2 +- source/md/include/Trajectory.h | 4 +- source/md/include/UnitManager.h | 2 +- source/op/add_flt_nvnmd.cc | 2 +- source/op/copy_flt_nvnmd.cc | 2 +- source/op/dotmul_flt_nvnmd.cc | 2 +- source/op/flt_nvnmd.cc | 2 +- source/op/mul_flt_nvnmd.cc | 2 +- source/tests/common.py | 10 +- 62 files changed, 489 insertions(+), 442 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7e0b0a4a76..9b3f099acb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: trailing-whitespace exclude: "^.+\\.pbtxt$" @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.3.5 + rev: v0.4.10 hooks: - id: ruff args: ["--fix"] @@ -52,7 +52,7 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v18.1.2 + rev: v18.1.7 hooks: - id: clang-format exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc @@ -75,6 +75,25 @@ repos: hooks: - id: cmake-format #- id: cmake-lint + - repo: https://github.com/njzjz/mirrors-bibtex-tidy + rev: v1.13.0 + hooks: + - id: bibtex-tidy + args: + - --curly + - --numeric + - --align=13 + - --blank-lines + # disable sort: the order of keys and fields has explict meanings + #- --sort=key + - --duplicates=key,doi,citation,abstract + - --merge=combine + #- --sort-fields + #- --strip-comments + - --trailing-commas + - --encode-urls + - --remove-empty-fields + - --wrap=80 # license header - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/CITATIONS.bib b/CITATIONS.bib index ac682b28f7..a456e16bf4 100644 --- a/CITATIONS.bib +++ b/CITATIONS.bib @@ -1,272 +1,339 @@ -The proposed feature of each article is described in the "annote" field. -Please cite a article if any feature is used - -@article{Wang_ComputPhysCommun_2018_v228_p178, - annote = {general purpose}, - author = {Wang, Han and Zhang, Linfeng and Han, Jiequn and E, Weinan}, - doi = {10.1016/j.cpc.2018.03.016}, - year = 2018, - month = {jul}, - publisher = {Elsevier {BV}}, - volume = 228, - journal = {Comput. Phys. Comm.}, - title = {{DeePMD-kit: A deep learning package for many-body potential - energy representation and molecular dynamics}}, - pages = {178--184}, -} - -@Article{Zeng_JChemPhys_2023_v159_p054801, - annote = {general purpose}, - title = {{DeePMD-kit v2: A software package for deep potential models}}, - author = {Jinzhe Zeng and Duo Zhang and Denghui Lu and Pinghui Mo and Zeyu Li - and Yixiao Chen and Mari{\'a}n Rynik and Li'ang Huang and Ziyao Li and - Shaochen Shi and Yingze Wang and Haotian Ye and Ping Tuo and Jiabin - Yang and Ye Ding and Yifan Li and Davide Tisi and Qiyu Zeng and Han - Bao and Yu Xia and Jiameng Huang and Koki Muraoka and Yibo Wang and - Junhan Chang and Fengbo Yuan and Sigbj{\o}rn L{\o}land Bore and Chun - Cai and Yinnian Lin and Bo Wang and Jiayan Xu and Jia-Xin Zhu and - Chenxing Luo and Yuzhi Zhang and Rhys E A Goodall and Wenshuo Liang - and Anurag Kumar Singh and Sikai Yao and Jingchao Zhang and Renata - Wentzcovitch and Jiequn Han and Jie Liu and Weile Jia and Darrin M - York and Weinan E and Roberto Car and Linfeng Zhang and Han Wang}, - journal = {J. Chem. Phys.}, - volume = 159, - issue = 5, - year = 2023, - pages = 054801, - doi = {10.1063/5.0155600}, -} - - -@article{Lu_CompPhysCommun_2021_v259_p107624, - annote = {GPU support}, - title={{86 PFLOPS Deep Potential Molecular Dynamics simulation of 100 million - atoms with ab initio accuracy}}, - author={Lu, Denghui and Wang, Han and Chen, Mohan and Lin, Lin and Car, Roberto - and E, Weinan and Jia, Weile and Zhang, Linfeng}, - journal={Comput. Phys. Comm.}, - volume={259}, - pages={107624}, - year={2021}, - publisher={Elsevier}, - doi={10.1016/j.cpc.2020.107624}, -} - -@article{Zhang_PhysRevLett_2018_v120_p143001, - annote = {local frame (loc_frame)}, - author = {Linfeng Zhang and Jiequn Han and Han Wang and - Roberto Car and Weinan E}, - journal = {Phys. Rev. Lett.}, - number = {14}, - pages = {143001}, - publisher = {APS}, - title = {{Deep potential molecular dynamics: a scalable model - with the accuracy of quantum mechanics}}, - volume = {120}, - year = {2018}, - doi = {10.1103/PhysRevLett.120.143001} -} - -@incollection{Zhang_BookChap_NIPS_2018_v31_p4436, - annote = {DeepPot-SE (se_e2_a, se_e2_r, se_e3, se_atten)}, - title = {{End-to-end Symmetry Preserving Inter-atomic Potential Energy Model - for Finite and Extended Systems}}, - author = {Zhang, Linfeng and Han, Jiequn and Wang, Han and Saidi, Wissam and - Car, Roberto and E, Weinan}, - booktitle = {Advances in Neural Information Processing Systems 31}, - editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. - Cesa-Bianchi and R. Garnett}, - pages = {4436--4446}, - year = {2018}, - publisher = {Curran Associates, Inc.}, - url = {https://dl.acm.org/doi/10.5555/3327345.3327356} -} - -@Article{Wang_NuclFusion_2022_v62_p126013, - annote = {three-body embedding DeepPot-SE (se_e3)}, - author = {Xiaoyang Wang and Yinan Wang and Linfeng Zhang and Fuzhi Dai and Han - Wang}, - title = {{A tungsten deep neural-network potential for simulating mechanical - property degradation under fusion service environment}}, - journal = {Nucl. Fusion}, - year = 2022, - volume = 62, - issue = 12, - pages = 126013, - doi = {10.1088/1741-4326/ac888b}, -} - -@misc{Zhang_2022_DPA1, - annote = {attention-based descriptor}, - author = {Zhang, Duo and Bi, Hangrui and Dai, Fu-Zhi and Jiang, Wanrun and Zhang, Linfeng and Wang, Han}, - title = {{DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular Simulation}}, - publisher = {arXiv}, - year = {2022}, - doi = {10.48550/arXiv.2208.08236}, -} - -@article{Zhang_PhysPlasmas_2020_v27_p122704, - annote = {frame-specific parameters (e.g. electronic temperature)}, - author = {Zhang, Yuzhi and Gao, Chang and Liu, Qianrui and Zhang, Linfeng and Wang, Han and Chen, Mohan}, - title = {{Warm dense matter simulation via electron temperature dependent deep potential molecular dynamics}}, - journal = {Phys. Plasmas}, - volume = {27}, - number = {12}, - pages = {122704}, - year = {2020}, - month = {12}, - doi = {10.1063/5.0023265}, -} - -@misc{Zeng_2023_TTMDPMD, - annote = {atom-specific parameter (e.g. electron temperature) }, - author = {Zeng, Qiyu and Chen, Bo and Zhang, Shen and Kang, Dongdong and Wang, Han and Yu, Xiaoxiang and Dai, Jiayu}, - title = {{Full-scale ab initio simulations of laser-driven atomistic dynamics}}, - publisher = {arXiv}, - year = {2023}, - doi = {10.48550/arXiv.2308.13863}, -} - -@article{Zhang_PhysRevB_2020_v102_p41121, - annote = {fit dipole}, - title={{Deep neural network for the dielectric response of insulators}}, - author={Zhang, Linfeng and Chen, Mohan and Wu, Xifan and Wang, Han and E, Weinan and Car, Roberto}, - journal={Phys. Rev. B}, - volume={102}, - number={4}, - pages={041121}, - year={2020}, - publisher={APS}, - doi={10.1103/PhysRevB.102.041121} -} - -@article{Sommers_PhysChemChemPhys_2020_v22_p10592, - annote = {fit polarizability}, - title={{Raman spectrum and polarizability of liquid water from deep neural networks}}, - author={Sommers, Grace M and Andrade, Marcos F Calegari and Zhang, Linfeng and Wang, Han and Car, Roberto}, - journal={Phys. Chem. Chem. Phys.}, - volume={22}, - number={19}, - pages={10592--10602}, - year={2020}, - publisher={Royal Society of Chemistry}, - doi={10.1039/D0CP01893G} -} - -@Article{Zeng_JChemTheoryComput_2023_v19_p1261, - annote = {fit relative energies}, - author = {Jinzhe Zeng and Yujun Tao and Timothy J Giese and Darrin M York}, - title = {{QD{\pi}: A Quantum Deep Potential Interaction Model for - Drug Discovery}}, - journal = {J. Chem. Theory Comput.}, - year = 2023, - volume = 19, - issue = 4, - pages = {1261--1275}, - doi = {10.1021/acs.jctc.2c01172}, -} - -@Article{Zeng_PhysRevB_2022_v105_p174109, - annote = {fit density of states}, - author = {Qiyu Zeng and Bo Chen and Xiaoxiang Yu and Shen Zhang and Dongdong - Kang and Han Wang and Jiayu Dai}, - title = {{Towards large-scale and spatiotemporally resolved diagnosis of - electronic density of states by deep learning}}, - journal = {Phys. Rev. B}, - year = 2022, - volume = 105, - issue = 17, - pages = 174109, - doi = {10.1103/PhysRevB.105.174109}, -} - -@Article{Zhang_JChemPhys_2022_v156_p124107, - annote = {DPLR, se_e2_r, hybrid descriptor}, - author = {Linfeng Zhang and Han Wang and Maria Carolina Muniz and Athanassios Z - Panagiotopoulos and Roberto Car and Weinan E}, - title = {{A deep potential model with long-range electrostatic interactions}}, - journal = {J. Chem. Phys.}, - year = 2022, - volume = 156, - issue = 12, - pages = 124107, - doi = {10.1063/5.0083669}, -} - -@article{Zeng_JChemTheoryComput_2021_v17_p6993, - annote= {DPRc}, - title={{Development of Range-Corrected Deep Learning Potentials for Fast, Accurate Quantum Mechanical/molecular Mechanical Simulations of Chemical Reactions in Solution}}, - author={Zeng, Jinzhe and Giese, Timothy J and Ekesan, {\c{S}}{\"o}len and York, Darrin M}, - journal={J. Chem. Theory Comput.}, - year=2021, - volume=17, - issue=11, - pages={6993-7009}, - doi = {10.1021/acs.jctc.1c00201}, -} - -@article{Wang_ApplPhysLett_2019_v114_p244101, - annote = {Interpolation with a pair-wise potential}, - title={{Deep learning inter-atomic potential model for accurate irradiation damage simulations}}, - author={Wang, Hao and Guo, Xun and Zhang, Linfeng and Wang, Han and Xue, Jianming}, - journal={Appl. Phys. Lett.}, - volume={114}, - number={24}, - pages={244101}, - year={2019}, - publisher={AIP Publishing LLC}, - doi={10.1063/1.5098061}, -} - -@article{Zhang_PhysRevMater_2019_v3_p23804, - annote = {model deviation}, - title = {{Active learning of uniformly accurate interatomic potentials for materials simulation}}, - author = {Linfeng Zhang and De-Ye Lin and Han Wang and Roberto Car and Weinan E}, - journal = {Phys. Rev. Mater.}, - volume = 3, - issue = 2, - pages = 23804, - year = 2019, - publisher = {American Physical Society}, - doi = {10.1103/PhysRevMaterials.3.023804}, -} - -@article{Lu_JChemTheoryComput_2022_v18_p5555, - annote = {DP Compress}, - author = {Denghui Lu and Wanrun Jiang and Yixiao Chen and Linfeng Zhang and - Weile Jia and Han Wang and Mohan Chen}, - title = {{DP Compress: A Model Compression Scheme for Generating Efficient Deep - Potential Models}}, - journal = {J. Chem. Theory Comput.}, - year = 2022, - volume=18, - issue=9, - pages={5555--5567}, - doi = {10.1021/acs.jctc.2c00102}, -} - -@article{Mo_npjComputMater_2022_v8_p107, - annote = {NVNMD}, - author = {Pinghui Mo and Chang Li and Dan Zhao and Yujia Zhang and Mengchao Shi - and Junhua Li and Jie Liu}, - title = {{Accurate and efficient molecular dynamics based on machine learning - and non von Neumann architecture}}, - journal = {npj Comput. Mater.}, - year = 2022, - volume = 8, - issue = 1, - pages = 107, - doi = {10.1038/s41524-022-00773-z}, -} - -@article{Zeng_EnergyFuels_2021_v35_p762, - annote = {relative or atomic model deviation}, - author = {Jinzhe Zeng and Linfeng Zhang and Han Wang and Tong Zhu}, - title = {{Exploring the Chemical Space of Linear Alkane Pyrolysis via Deep Potential GENerator}}, - journal = {Energy \& Fuels}, - volume = 35, - number = 1, - pages = {762--769}, - year = 2021, - doi = {10.1021/acs.energyfuels.0c03211}, -} +The proposed feature of each article is described in the "annote" field. +Please cite a article if any feature is used +@article{Wang_ComputPhysCommun_2018_v228_p178, + annote = {general purpose}, + author = {Wang, Han and Zhang, Linfeng and Han, Jiequn and E, Weinan}, + doi = {10.1016/j.cpc.2018.03.016}, + year = 2018, + month = {jul}, + publisher = {Elsevier {BV}}, + volume = 228, + journal = {Comput. Phys. Comm.}, + title = { + {DeePMD-kit: A deep learning package for many-body potential energy + representation and molecular dynamics} + }, + pages = {178--184}, +} + +@article{Zeng_JChemPhys_2023_v159_p054801, + annote = {general purpose}, + title = {{DeePMD-kit v2: A software package for deep potential models}}, + author = { + Jinzhe Zeng and Duo Zhang and Denghui Lu and Pinghui Mo and Zeyu Li and + Yixiao Chen and Mari{\'a}n Rynik and Li'ang Huang and Ziyao Li and Shaochen + Shi and Yingze Wang and Haotian Ye and Ping Tuo and Jiabin Yang and Ye Ding + and Yifan Li and Davide Tisi and Qiyu Zeng and Han Bao and Yu Xia and + Jiameng Huang and Koki Muraoka and Yibo Wang and Junhan Chang and Fengbo + Yuan and Sigbj{\o}rn L{\o}land Bore and Chun Cai and Yinnian Lin and Bo + Wang and Jiayan Xu and Jia-Xin Zhu and Chenxing Luo and Yuzhi Zhang and + Rhys E A Goodall and Wenshuo Liang and Anurag Kumar Singh and Sikai Yao and + Jingchao Zhang and Renata Wentzcovitch and Jiequn Han and Jie Liu and Weile + Jia and Darrin M York and Weinan E and Roberto Car and Linfeng Zhang and + Han Wang + }, + journal = {J. Chem. Phys.}, + volume = 159, + issue = 5, + year = 2023, + pages = 054801, + doi = {10.1063/5.0155600}, +} + +@article{Lu_CompPhysCommun_2021_v259_p107624, + annote = {GPU support}, + title = { + {86 PFLOPS Deep Potential Molecular Dynamics simulation of 100 million + atoms with ab initio accuracy} + }, + author = { + Lu, Denghui and Wang, Han and Chen, Mohan and Lin, Lin and Car, Roberto and + E, Weinan and Jia, Weile and Zhang, Linfeng + }, + journal = {Comput. Phys. Comm.}, + volume = 259, + pages = 107624, + year = 2021, + publisher = {Elsevier}, + doi = {10.1016/j.cpc.2020.107624}, +} + +@article{Zhang_PhysRevLett_2018_v120_p143001, + annote = {local frame (loc\_frame)}, + author = {Linfeng Zhang and Jiequn Han and Han Wang and Roberto Car and Weinan E}, + journal = {Phys. Rev. Lett.}, + number = 14, + pages = 143001, + publisher = {APS}, + title = { + {Deep potential molecular dynamics: a scalable model with the accuracy of + quantum mechanics} + }, + volume = 120, + year = 2018, + doi = {10.1103/PhysRevLett.120.143001}, +} + +@incollection{Zhang_BookChap_NIPS_2018_v31_p4436, + annote = {DeepPot-SE (se\_e2\_a, se\_e2\_r, se\_e3, se\_atten)}, + title = { + {End-to-end Symmetry Preserving Inter-atomic Potential Energy Model for + Finite and Extended Systems} + }, + author = { + Zhang, Linfeng and Han, Jiequn and Wang, Han and Saidi, Wissam and Car, + Roberto and E, Weinan + }, + booktitle = {Advances in Neural Information Processing Systems 31}, + editor = { + S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. + Cesa-Bianchi and R. Garnett + }, + pages = {4436--4446}, + year = 2018, + publisher = {Curran Associates, Inc.}, + url = {https://dl.acm.org/doi/10.5555/3327345.3327356}, +} + +@article{Wang_NuclFusion_2022_v62_p126013, + annote = {three-body embedding DeepPot-SE (se\_e3)}, + author = {Xiaoyang Wang and Yinan Wang and Linfeng Zhang and Fuzhi Dai and Han Wang}, + title = { + {A tungsten deep neural-network potential for simulating mechanical + property degradation under fusion service environment} + }, + journal = {Nucl. Fusion}, + year = 2022, + volume = 62, + issue = 12, + pages = 126013, + doi = {10.1088/1741-4326/ac888b}, +} + +@misc{Zhang_2022_DPA1, + annote = {attention-based descriptor}, + author = { + Zhang, Duo and Bi, Hangrui and Dai, Fu-Zhi and Jiang, Wanrun and Zhang, + Linfeng and Wang, Han + }, + title = { + {DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular + Simulation} + }, + publisher = {arXiv}, + year = 2022, + doi = {10.48550/arXiv.2208.08236}, +} + +@article{Zhang_PhysPlasmas_2020_v27_p122704, + annote = {frame-specific parameters (e.g. electronic temperature)}, + author = { + Zhang, Yuzhi and Gao, Chang and Liu, Qianrui and Zhang, Linfeng and Wang, + Han and Chen, Mohan + }, + title = { + {Warm dense matter simulation via electron temperature dependent deep + potential molecular dynamics} + }, + journal = {Phys. Plasmas}, + volume = 27, + number = 12, + pages = 122704, + year = 2020, + month = 12, + doi = {10.1063/5.0023265}, +} + +@misc{Zeng_2023_TTMDPMD, + annote = {atom-specific parameter (e.g. electron temperature)}, + author = { + Zeng, Qiyu and Chen, Bo and Zhang, Shen and Kang, Dongdong and Wang, Han + and Yu, Xiaoxiang and Dai, Jiayu + }, + title = {{Full-scale ab initio simulations of laser-driven atomistic dynamics}}, + publisher = {arXiv}, + year = 2023, + doi = {10.48550/arXiv.2308.13863}, +} + +@article{Zhang_PhysRevB_2020_v102_p41121, + annote = {fit dipole}, + title = {{Deep neural network for the dielectric response of insulators}}, + author = { + Zhang, Linfeng and Chen, Mohan and Wu, Xifan and Wang, Han and E, Weinan + and Car, Roberto + }, + journal = {Phys. Rev. B}, + volume = 102, + number = 4, + pages = {041121}, + year = 2020, + publisher = {APS}, + doi = {10.1103/PhysRevB.102.041121}, +} + +@article{Sommers_PhysChemChemPhys_2020_v22_p10592, + annote = {fit polarizability}, + title = { + {Raman spectrum and polarizability of liquid water from deep neural + networks} + }, + author = { + Sommers, Grace M and Andrade, Marcos F Calegari and Zhang, Linfeng and + Wang, Han and Car, Roberto + }, + journal = {Phys. Chem. Chem. Phys.}, + volume = 22, + number = 19, + pages = {10592--10602}, + year = 2020, + publisher = {Royal Society of Chemistry}, + doi = {10.1039/D0CP01893G}, +} + +@article{Zeng_JChemTheoryComput_2023_v19_p1261, + annote = {fit relative energies}, + author = {Jinzhe Zeng and Yujun Tao and Timothy J Giese and Darrin M York}, + title = {{QD{\pi}: A Quantum Deep Potential Interaction Model for Drug Discovery}}, + journal = {J. Chem. Theory Comput.}, + year = 2023, + volume = 19, + issue = 4, + pages = {1261--1275}, + doi = {10.1021/acs.jctc.2c01172}, +} + +@article{Zeng_PhysRevB_2022_v105_p174109, + annote = {fit density of states}, + author = { + Qiyu Zeng and Bo Chen and Xiaoxiang Yu and Shen Zhang and Dongdong Kang and + Han Wang and Jiayu Dai + }, + title = { + {Towards large-scale and spatiotemporally resolved diagnosis of electronic + density of states by deep learning} + }, + journal = {Phys. Rev. B}, + year = 2022, + volume = 105, + issue = 17, + pages = 174109, + doi = {10.1103/PhysRevB.105.174109}, +} + +@article{Zhang_JChemPhys_2022_v156_p124107, + annote = {DPLR, se\_e2\_r, hybrid descriptor}, + author = { + Linfeng Zhang and Han Wang and Maria Carolina Muniz and Athanassios Z + Panagiotopoulos and Roberto Car and Weinan E + }, + title = {{A deep potential model with long-range electrostatic interactions}}, + journal = {J. Chem. Phys.}, + year = 2022, + volume = 156, + issue = 12, + pages = 124107, + doi = {10.1063/5.0083669}, +} + +@article{Zeng_JChemTheoryComput_2021_v17_p6993, + annote = {DPRc}, + title = { + {Development of Range-Corrected Deep Learning Potentials for Fast, Accurate + Quantum Mechanical/molecular Mechanical Simulations of Chemical Reactions + in Solution} + }, + author = { + Zeng, Jinzhe and Giese, Timothy J and Ekesan, {\c{S}}{\"o}len and York, + Darrin M + }, + journal = {J. Chem. Theory Comput.}, + year = 2021, + volume = 17, + issue = 11, + pages = {6993--7009}, + doi = {10.1021/acs.jctc.1c00201}, +} + +@article{Wang_ApplPhysLett_2019_v114_p244101, + annote = {Interpolation with a pair-wise potential}, + title = { + {Deep learning inter-atomic potential model for accurate irradiation damage + simulations} + }, + author = {Wang, Hao and Guo, Xun and Zhang, Linfeng and Wang, Han and Xue, Jianming}, + journal = {Appl. Phys. Lett.}, + volume = 114, + number = 24, + pages = 244101, + year = 2019, + publisher = {AIP Publishing LLC}, + doi = {10.1063/1.5098061}, +} + +@article{Zhang_PhysRevMater_2019_v3_p23804, + annote = {model deviation}, + title = { + {Active learning of uniformly accurate interatomic potentials for materials + simulation} + }, + author = {Linfeng Zhang and De-Ye Lin and Han Wang and Roberto Car and Weinan E}, + journal = {Phys. Rev. Mater.}, + volume = 3, + issue = 2, + pages = 23804, + year = 2019, + publisher = {American Physical Society}, + doi = {10.1103/PhysRevMaterials.3.023804}, +} + +@article{Lu_JChemTheoryComput_2022_v18_p5555, + annote = {DP Compress}, + author = { + Denghui Lu and Wanrun Jiang and Yixiao Chen and Linfeng Zhang and Weile Jia + and Han Wang and Mohan Chen + }, + title = { + {DP Compress: A Model Compression Scheme for Generating Efficient Deep + Potential Models} + }, + journal = {J. Chem. Theory Comput.}, + year = 2022, + volume = 18, + issue = 9, + pages = {5555--5567}, + doi = {10.1021/acs.jctc.2c00102}, +} + +@article{Mo_npjComputMater_2022_v8_p107, + annote = {NVNMD}, + author = { + Pinghui Mo and Chang Li and Dan Zhao and Yujia Zhang and Mengchao Shi and + Junhua Li and Jie Liu + }, + title = { + {Accurate and efficient molecular dynamics based on machine learning and + non von Neumann architecture} + }, + journal = {npj Comput. Mater.}, + year = 2022, + volume = 8, + issue = 1, + pages = 107, + doi = {10.1038/s41524-022-00773-z}, +} + +@article{Zeng_EnergyFuels_2021_v35_p762, + annote = {relative or atomic model deviation}, + author = {Jinzhe Zeng and Linfeng Zhang and Han Wang and Tong Zhu}, + title = { + {Exploring the Chemical Space of Linear Alkane Pyrolysis via Deep Potential + GENerator} + }, + journal = {Energy \& Fuels}, + volume = 35, + number = 1, + pages = {762--769}, + year = 2021, + doi = {10.1021/acs.energyfuels.0c03211}, +} diff --git a/backend/read_env.py b/backend/read_env.py index 079211d4d7..06f6f0c7e2 100644 --- a/backend/read_env.py +++ b/backend/read_env.py @@ -60,7 +60,7 @@ def get_argument_from_env() -> Tuple[str, list, list, dict, str]: if hipcc_flags is not None: os.environ["HIPFLAGS"] = os.environ.get("HIPFLAGS", "") + " " + hipcc_flags else: - raise RuntimeError("Unsupported DP_VARIANT option: %s" % dp_variant) + raise RuntimeError(f"Unsupported DP_VARIANT option: {dp_variant}") if os.environ.get("DP_BUILD_TESTING", "0") == "1": cmake_args.append("-DBUILD_TESTING:BOOL=TRUE") diff --git a/data/raw/copy_raw.py b/data/raw/copy_raw.py index 69ccdf5c63..2a4078df4d 100755 --- a/data/raw/copy_raw.py +++ b/data/raw/copy_raw.py @@ -85,7 +85,7 @@ def _main(): ) args = parser.parse_args() - print("# copy the system by %s copies" % args.ncopies) # noqa: T201 + print(f"# copy the system by {args.ncopies} copies") # noqa: T201 assert np.all( np.array(args.ncopies, dtype=int) >= np.array([1, 1, 1], dtype=int) ), "number of copies should be larger than or equal to 1" diff --git a/deepmd/cluster/local.py b/deepmd/cluster/local.py index 3c12c9dc85..694f6b8b56 100644 --- a/deepmd/cluster/local.py +++ b/deepmd/cluster/local.py @@ -43,7 +43,7 @@ def get_gpus(): stdout, stderr = p.communicate() if p.returncode != 0: decoded = stderr.decode("UTF-8") - raise RuntimeError("Failed to detect availbe GPUs due to:\n%s" % decoded) + raise RuntimeError(f"Failed to detect availbe GPUs due to:\n{decoded}") decoded = stdout.decode("UTF-8").strip() num_gpus = int(decoded) return list(range(num_gpus)) if num_gpus > 0 else None diff --git a/deepmd/descriptor/descriptor.py b/deepmd/descriptor/descriptor.py index 70c6cb2758..3e3115b9dd 100644 --- a/deepmd/descriptor/descriptor.py +++ b/deepmd/descriptor/descriptor.py @@ -283,7 +283,7 @@ def enable_compression( This method is called by others when the descriptor supported compression. """ raise NotImplementedError( - "Descriptor %s doesn't support compression!" % type(self).__name__ + f"Descriptor {type(self).__name__} doesn't support compression!" ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: @@ -299,8 +299,7 @@ def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: This method is called by others when the descriptor supported compression. """ raise NotImplementedError( - "Descriptor %s doesn't support mixed precision training!" - % type(self).__name__ + f"Descriptor {type(self).__name__} doesn't support mixed precision training!" ) @abstractmethod @@ -351,8 +350,7 @@ def init_variables( This method is called by others when the descriptor supported initialization from the given variables. """ raise NotImplementedError( - "Descriptor %s doesn't support initialization from the given variables!" - % type(self).__name__ + f"Descriptor {type(self).__name__} doesn't support initialization from the given variables!" ) def get_tensor_names(self, suffix: str = "") -> Tuple[str]: @@ -369,7 +367,7 @@ def get_tensor_names(self, suffix: str = "") -> Tuple[str]: Names of tensors """ raise NotImplementedError( - "Descriptor %s doesn't support this property!" % type(self).__name__ + f"Descriptor {type(self).__name__} doesn't support this property!" ) def pass_tensors_from_frz_model( @@ -389,7 +387,7 @@ def pass_tensors_from_frz_model( :meth:`get_tensor_names`. """ raise NotImplementedError( - "Descriptor %s doesn't support this method!" % type(self).__name__ + f"Descriptor {type(self).__name__} doesn't support this method!" ) def build_type_exclude_mask( diff --git a/deepmd/descriptor/loc_frame.py b/deepmd/descriptor/loc_frame.py index ccb66c864d..410cd038ac 100644 --- a/deepmd/descriptor/loc_frame.py +++ b/deepmd/descriptor/loc_frame.py @@ -427,12 +427,8 @@ def init_variables( suffix : str, optional The suffix of the scope """ - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") @classmethod def update_sel(cls, global_jdata: dict, local_jdata: dict): diff --git a/deepmd/descriptor/se.py b/deepmd/descriptor/se.py index 598f6f9ff8..064e1c3bf2 100644 --- a/deepmd/descriptor/se.py +++ b/deepmd/descriptor/se.py @@ -130,12 +130,8 @@ def init_variables( self.embedding_net_variables = get_embedding_net_variables_from_graph_def( graph_def, suffix=suffix ) - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") @property def precision(self) -> tf.DType: diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index ddbf9a34e5..f3e62d3672 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -540,12 +540,8 @@ def enable_compression( min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 ) - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: """Reveive the mixed precision setting. @@ -1291,14 +1287,14 @@ def init_variables( super().init_variables(graph=graph, graph_def=graph_def, suffix=suffix) try: self.original_sel = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/original_sel" % suffix + graph, f"descrpt_attr{suffix}/original_sel" ) except GraphWithoutTensorError: # original_sel is not restored in old graphs, assume sel never changed before pass # check sel == original sel? try: - sel = get_tensor_by_name_from_graph(graph, "descrpt_attr%s/sel" % suffix) + sel = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/sel") except GraphWithoutTensorError: # sel is not restored in old graphs pass diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index a9058cbdef..5615863254 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -447,12 +447,8 @@ def enable_compression( ) self.two_embd = make_data(self, self.final_type_embedding) - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def build( self, diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index b603d7098b..d5f8e59d18 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -365,12 +365,8 @@ def enable_compression( min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2 ) - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def build( self, diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index 86e5ca6a1e..906ca144c4 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -368,12 +368,8 @@ def enable_compression( min_nbor_dist, table_extrapolate, table_stride_1 * 10, table_stride_2 * 10 ) - self.davg = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_avg" % suffix - ) - self.dstd = get_tensor_by_name_from_graph( - graph, "descrpt_attr%s/t_std" % suffix - ) + self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg") + self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def build( self, diff --git a/deepmd/entrypoints/compress.py b/deepmd/entrypoints/compress.py index 61d6dfcb44..85f5ed4e74 100644 --- a/deepmd/entrypoints/compress.py +++ b/deepmd/entrypoints/compress.py @@ -101,11 +101,10 @@ def compress( except GraphWithoutTensorError as e: if training_script is None: raise RuntimeError( - "The input frozen model: %s has no training script or min_nbor_dist information, " + f"The input frozen model: {input} has no training script or min_nbor_dist information, " "which is not supported by the model compression interface. " "Please consider using the --training-script command within the model compression interface to provide the training script of the input frozen model. " "Note that the input training script must contain the correct path to the training data." - % input ) from e elif not os.path.exists(training_script): raise RuntimeError( @@ -160,10 +159,10 @@ def compress( ) except GraphTooLargeError as e: raise RuntimeError( - "The uniform step size of the tabulation's first table is %f, " + f"The uniform step size of the tabulation's first table is {step:f}, " "which is too small. This leads to a very large graph size, " "exceeding protobuf's limitation (2 GB). You should try to " - "increase the step size." % step + "increase the step size." ) from e # reset the graph, otherwise the size limitation will be only 2 GB / 2 = 1 GB @@ -176,10 +175,10 @@ def compress( freeze(checkpoint_folder=checkpoint_folder, output=output, node_names=None) except GraphTooLargeError as e: raise RuntimeError( - "The uniform step size of the tabulation's first table is %f, " + f"The uniform step size of the tabulation's first table is {step:f}, " "which is too small. This leads to a very large graph size, " "exceeding protobuf's limitation (2 GB). You should try to " - "increase the step size." % step + "increase the step size." ) from e diff --git a/deepmd/entrypoints/freeze.py b/deepmd/entrypoints/freeze.py index f13ea85ab0..ddb9d3cf68 100755 --- a/deepmd/entrypoints/freeze.py +++ b/deepmd/entrypoints/freeze.py @@ -368,9 +368,9 @@ def freeze_graph( different_set -= set(optional_node) if different_set: log.warning( - "The following nodes are not in the graph: %s. " + f"The following nodes are not in the graph: {different_set}. " "Skip freezeing these nodes. You may be freezing " - "a checkpoint generated by an old version." % different_set + "a checkpoint generated by an old version." ) # use intersection as output list output_node = list(set(output_node) & set(input_node)) diff --git a/deepmd/entrypoints/neighbor_stat.py b/deepmd/entrypoints/neighbor_stat.py index 28cab00ad2..1c082322b5 100644 --- a/deepmd/entrypoints/neighbor_stat.py +++ b/deepmd/entrypoints/neighbor_stat.py @@ -59,6 +59,6 @@ def neighbor_stat( data.get_batch() nei = NeighborStat(data.get_ntypes(), rcut, one_type=one_type) min_nbor_dist, max_nbor_size = nei.get_stat(data) - log.info("min_nbor_dist: %f" % min_nbor_dist) - log.info("max_nbor_size: %s" % str(max_nbor_size)) + log.info(f"min_nbor_dist: {min_nbor_dist:f}") + log.info(f"max_nbor_size: {max_nbor_size!s}") return min_nbor_dist, max_nbor_size diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 7bd4883925..1a8e507639 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -410,14 +410,14 @@ def test_ener( save_txt_file( detail_path.with_suffix(".e.out"), pe, - header="%s: data_e pred_e" % system, + header=f"{system}: data_e pred_e", append=append_detail, ) pe_atom = pe / natoms save_txt_file( detail_path.with_suffix(".e_peratom.out"), pe_atom, - header="%s: data_e pred_e" % system, + header=f"{system}: data_e pred_e", append=append_detail, ) if dp.get_ntypes_spin() == 0: @@ -431,7 +431,7 @@ def test_ener( save_txt_file( detail_path.with_suffix(".f.out"), pf, - header="%s: data_fx data_fy data_fz pred_fx pred_fy pred_fz" % system, + header=f"{system}: data_fx data_fy data_fz pred_fx pred_fy pred_fz", append=append_detail, ) else: @@ -446,14 +446,13 @@ def test_ener( save_txt_file( detail_path.with_suffix(".fr.out"), pf_real, - header="%s: data_fx data_fy data_fz pred_fx pred_fy pred_fz" % system, + header=f"{system}: data_fx data_fy data_fz pred_fx pred_fy pred_fz", append=append_detail, ) save_txt_file( detail_path.with_suffix(".fm.out"), pf_mag, - header="%s: data_fmx data_fmy data_fmz pred_fmx pred_fmy pred_fmz" - % system, + header=f"{system}: data_fmx data_fmy data_fmz pred_fmx pred_fmy pred_fmz", append=append_detail, ) pv = np.concatenate( diff --git a/deepmd/fit/dos.py b/deepmd/fit/dos.py index bbf7d39a09..57059b1387 100644 --- a/deepmd/fit/dos.py +++ b/deepmd/fit/dos.py @@ -587,21 +587,21 @@ def init_variables( self.fitting_net_variables.update(shared_variables) if self.numb_fparam > 0: self.fparam_avg = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_fparam_avg" % suffix + graph, f"fitting_attr{suffix}/t_fparam_avg" ) self.fparam_inv_std = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_fparam_istd" % suffix + graph, f"fitting_attr{suffix}/t_fparam_istd" ) if self.numb_aparam > 0: self.aparam_avg = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_aparam_avg" % suffix + graph, f"fitting_attr{suffix}/t_aparam_avg" ) self.aparam_inv_std = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_aparam_istd" % suffix + graph, f"fitting_attr{suffix}/t_aparam_istd" ) try: self.bias_dos = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_bias_dos" % suffix + graph, f"fitting_attr{suffix}/t_bias_dos" ) except GraphWithoutTensorError: # for compatibility, old models has no t_bias_dos diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index 4610384d7e..e417076fce 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -754,21 +754,21 @@ def init_variables( self.fitting_net_variables.update(shared_variables) if self.numb_fparam > 0: self.fparam_avg = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_fparam_avg" % suffix + graph, f"fitting_attr{suffix}/t_fparam_avg" ) self.fparam_inv_std = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_fparam_istd" % suffix + graph, f"fitting_attr{suffix}/t_fparam_istd" ) if self.numb_aparam > 0: self.aparam_avg = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_aparam_avg" % suffix + graph, f"fitting_attr{suffix}/t_aparam_avg" ) self.aparam_inv_std = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_aparam_istd" % suffix + graph, f"fitting_attr{suffix}/t_aparam_istd" ) try: self.bias_atom_e = get_tensor_by_name_from_graph( - graph, "fitting_attr%s/t_bias_atom_e" % suffix + graph, f"fitting_attr{suffix}/t_bias_atom_e" ) except GraphWithoutTensorError: # for compatibility, old models has no t_bias_atom_e diff --git a/deepmd/fit/fitting.py b/deepmd/fit/fitting.py index a467ec1201..61af88a82b 100644 --- a/deepmd/fit/fitting.py +++ b/deepmd/fit/fitting.py @@ -82,8 +82,7 @@ def init_variables( This method is called by others when the fitting supported initialization from the given variables. """ raise NotImplementedError( - "Fitting %s doesn't support initialization from the given variables!" - % type(self).__name__ + f"Fitting {type(self).__name__} doesn't support initialization from the given variables!" ) @abstractmethod diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py index 81cfdde7a8..ea72d1330b 100644 --- a/deepmd/infer/deep_pot.py +++ b/deepmd/infer/deep_pot.py @@ -124,7 +124,7 @@ def __init__( # check if the graph has these operations: # if yes add them - if ("%s/t_efield" % load_prefix) in operations: + if (f"{load_prefix}/t_efield") in operations: self.tensors.update({"t_efield": "t_efield:0"}) self.has_efield = True else: @@ -132,7 +132,7 @@ def __init__( self.t_efield = None self.has_efield = False - if ("%s/t_fparam" % load_prefix) in operations: + if (f"{load_prefix}/t_fparam") in operations: self.tensors.update({"t_fparam": "t_fparam:0"}) self.has_fparam = True else: @@ -140,7 +140,7 @@ def __init__( self.t_fparam = None self.has_fparam = False - if ("%s/t_aparam" % load_prefix) in operations: + if (f"{load_prefix}/t_aparam") in operations: self.tensors.update({"t_aparam": "t_aparam:0"}) self.has_aparam = True else: @@ -148,7 +148,7 @@ def __init__( self.t_aparam = None self.has_aparam = False - if ("%s/spin_attr/ntypes_spin" % load_prefix) in operations: + if (f"{load_prefix}/spin_attr/ntypes_spin") in operations: self.tensors.update({"t_ntypes_spin": "spin_attr/ntypes_spin:0"}) self.has_spin = True else: diff --git a/deepmd/model/dos.py b/deepmd/model/dos.py index 22e291a0f0..76329b5798 100644 --- a/deepmd/model/dos.py +++ b/deepmd/model/dos.py @@ -228,6 +228,6 @@ def init_variables( self.fitting.init_variables(graph, graph_def, suffix=suffix) tf.constant("compressed_model", name="model_type", dtype=tf.string) else: - raise RuntimeError("Unknown model type %s" % model_type) + raise RuntimeError(f"Unknown model type {model_type}") if self.typeebd is not None: self.typeebd.init_variables(graph, graph_def, suffix=suffix) diff --git a/deepmd/model/ener.py b/deepmd/model/ener.py index 0d8d66b305..7f665fe2c3 100644 --- a/deepmd/model/ener.py +++ b/deepmd/model/ener.py @@ -375,7 +375,7 @@ def init_variables( self.fitting.init_variables(graph, graph_def, suffix=suffix) tf.constant("compressed_model", name="model_type", dtype=tf.string) else: - raise RuntimeError("Unknown model type %s" % model_type) + raise RuntimeError(f"Unknown model type {model_type}") if ( self.typeebd is not None and self.typeebd.type_embedding_net_variables is None diff --git a/deepmd/model/model.py b/deepmd/model/model.py index 6117b4942d..3f2fde415f 100644 --- a/deepmd/model/model.py +++ b/deepmd/model/model.py @@ -301,11 +301,11 @@ def build_descrpt( else: tf.constant( self.rcut, - name="descrpt_attr%s/rcut" % suffix, + name=f"descrpt_attr{suffix}/rcut", dtype=GLOBAL_TF_FLOAT_PRECISION, ) tf.constant( - self.ntypes, name="descrpt_attr%s/ntypes" % suffix, dtype=tf.int32 + self.ntypes, name=f"descrpt_attr{suffix}/ntypes", dtype=tf.int32 ) if "global_feed_dict" in input_dict: feed_dict = input_dict["global_feed_dict"] @@ -320,7 +320,7 @@ def build_descrpt( ) return_elements = [ *self.descrpt.get_tensor_names(suffix=suffix), - "o_descriptor%s:0" % suffix, + f"o_descriptor{suffix}:0", ] if frz_model is not None: imported_tensors = self._import_graph_def_from_frz_model( diff --git a/deepmd/model/tensor.py b/deepmd/model/tensor.py index 6a21e085f3..9564e9b903 100644 --- a/deepmd/model/tensor.py +++ b/deepmd/model/tensor.py @@ -234,7 +234,7 @@ def init_variables( self.fitting.init_variables(graph, graph_def, suffix=suffix) tf.constant("compressed_model", name="model_type", dtype=tf.string) else: - raise RuntimeError("Unknown model type %s" % model_type) + raise RuntimeError(f"Unknown model type {model_type}") class WFCModel(TensorModel): diff --git a/deepmd/nvnmd/entrypoints/train.py b/deepmd/nvnmd/entrypoints/train.py index 6e14b6f865..439ebaed0a 100644 --- a/deepmd/nvnmd/entrypoints/train.py +++ b/deepmd/nvnmd/entrypoints/train.py @@ -126,7 +126,7 @@ def train_nvnmd( ): # test input if not os.path.exists(INPUT): - log.warning("The input script %s does not exist" % (INPUT)) + log.warning(f"The input script {INPUT} does not exist") # STEP1 PATH_CNN = "nvnmd_cnn" CONFIG_CNN = os.path.join(PATH_CNN, "config.npy") diff --git a/deepmd/nvnmd/entrypoints/wrap.py b/deepmd/nvnmd/entrypoints/wrap.py index 1ba2ed7384..868afbed93 100644 --- a/deepmd/nvnmd/entrypoints/wrap.py +++ b/deepmd/nvnmd/entrypoints/wrap.py @@ -137,7 +137,7 @@ def wrap(self): # DEVELOP_DEBUG if jdata_sys["debug"]: log.info("%s: %d x % d bit" % (k, h, w * 4)) - FioTxt().save("nvnmd/wrap/h%s.txt" % (k), d) + FioTxt().save(f"nvnmd/wrap/h{k}.txt", d) datas[ii] = d # update h & w of nvnmd_cfg nvnmd_cfg.size["NH_DATA"] = nhs diff --git a/deepmd/nvnmd/utils/fio.py b/deepmd/nvnmd/utils/fio.py index 82567e3917..3efd7520dd 100644 --- a/deepmd/nvnmd/utils/fio.py +++ b/deepmd/nvnmd/utils/fio.py @@ -180,7 +180,7 @@ def save(self, file_name: str, data: List[str]): buff.extend(list(bytearray.fromhex(si))[::-1]) # with open(file_name, "wb") as fp: - fp.write(struct.pack("%sB" % len(buff), *buff)) + fp.write(struct.pack(f"{len(buff)}B", *buff)) class FioTxt: diff --git a/deepmd/train/trainer.py b/deepmd/train/trainer.py index 719250e7fb..d059aa84cc 100644 --- a/deepmd/train/trainer.py +++ b/deepmd/train/trainer.py @@ -239,7 +239,7 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix=""): ) else: log.info( - "fitting net %s training without frame parameter" % fitting_key + f"fitting net {fitting_key} training without frame parameter" ) if not self.is_compress: @@ -437,8 +437,7 @@ def _build_optimizer(self, fitting_key=None): _TF_VERSION = Version(TF_VERSION) if _TF_VERSION < Version("1.14.0"): raise RuntimeError( - "TensorFlow version %s is not compatible with the mixed precision setting. Please consider upgrading your TF version!" - % TF_VERSION + f"TensorFlow version {TF_VERSION} is not compatible with the mixed precision setting. Please consider upgrading your TF version!" ) elif _TF_VERSION < Version("2.4.0"): optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite( @@ -502,14 +501,14 @@ def _init_session(self): fp = open(self.disp_file, "w") fp.close() elif self.run_opt.init_mode == "init_from_model": - log.info("initialize from model %s" % self.run_opt.init_model) + log.info(f"initialize from model {self.run_opt.init_model}") run_sess(self.sess, init_op) self.saver.restore(self.sess, self.run_opt.init_model) run_sess(self.sess, self.global_step.assign(0)) fp = open(self.disp_file, "w") fp.close() elif self.run_opt.init_mode == "restart": - log.info("restart from model %s" % self.run_opt.restart) + log.info(f"restart from model {self.run_opt.restart}") run_sess(self.sess, init_op) self.saver.restore(self.sess, self.run_opt.restart) elif self.run_opt.init_mode == "init_from_frz_model": @@ -844,7 +843,7 @@ def save_checkpoint(self, cur_batch: int): os.symlink(os.path.relpath(ori_ff, os.path.dirname(new_ff)), new_ff) else: shutil.copyfile(ori_ff, new_ff) - log.info("saved checkpoint %s" % self.save_ckpt) + log.info(f"saved checkpoint {self.save_ckpt}") def get_feed_dict(self, batch, is_training): feed_dict = {} @@ -970,7 +969,7 @@ def print_on_training( prop_fmt = " %11.2e" for k in train_results.keys(): print_str += prop_fmt % (train_results[k]) - print_str += " %8.1e\n" % cur_lr + print_str += f" {cur_lr:8.1e}\n" else: for fitting_key in train_results: if valid_results[fitting_key] is not None: @@ -985,7 +984,7 @@ def print_on_training( prop_fmt = " %11.2e" for k in train_results[fitting_key].keys(): print_str += prop_fmt % (train_results[fitting_key][k]) - print_str += " %8.1e\n" % cur_lr_dict[fitting_key] + print_str += f" {cur_lr_dict[fitting_key]:8.1e}\n" fp.write(print_str) fp.flush() @@ -1063,9 +1062,8 @@ def _init_from_frz_model(self): except GraphWithoutTensorError as e: # throw runtime error if the frozen_model has no model type information... raise RuntimeError( - "The input frozen model: %s has no 'model_type' information, " + f"The input frozen model: {self.run_opt.init_frz_model} has no 'model_type' information, " "which is not supported by the 'dp train init-frz-model' interface. " - % self.run_opt.init_frz_model ) from e else: self.model_type = bytes.decode(t_model_type) @@ -1117,9 +1115,8 @@ def _init_from_pretrained_model( except GraphWithoutTensorError as e: # throw runtime error if the frozen_model has no model type information... raise RuntimeError( - "The input frozen pretrained model: %s has no 'model_type' information, " + f"The input frozen pretrained model: {self.run_opt.finetune} has no 'model_type' information, " "which is not supported by the 'dp train finetune' interface. " - % self.run_opt.finetune ) from e else: self.model_type = bytes.decode(t_model_type) diff --git a/deepmd/utils/convert.py b/deepmd/utils/convert.py index 13e07f0885..ff90b596d2 100644 --- a/deepmd/utils/convert.py +++ b/deepmd/utils/convert.py @@ -77,8 +77,7 @@ def convert_to_21(input_model: str, output_model: str, version: Optional[str] = convert_pb_to_pbtxt(input_model, "frozen_model.pbtxt") if version is None: raise ValueError( - "The version of the DP graph %s cannot be detected. Please do the conversion manually." - % (input_model) + f"The version of the DP graph {input_model} cannot be detected. Please do the conversion manually." ) if version in SpecifierSet("<1.0"): convert_dp012_to_dp10("frozen_model.pbtxt") diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py index 922d131b7e..84b0da962f 100644 --- a/deepmd/utils/finetune.py +++ b/deepmd/utils/finetune.py @@ -33,10 +33,9 @@ def replace_model_params_with_pretrained_model( t_jdata = get_tensor_by_name(pretrained_model, "train_attr/training_script") except GraphWithoutTensorError as e: raise RuntimeError( - "The input frozen pretrained model: %s has no training script, " + f"The input frozen pretrained model: {input} has no training script, " "which is not supported to perform finetuning. " "Please use the model pretrained with v2.1.5 or higher version of DeePMD-kit." - % input ) from e pretrained_jdata = json.loads(t_jdata) diff --git a/deepmd/utils/multi_init.py b/deepmd/utils/multi_init.py index d29a42cf0f..83de69e3a0 100644 --- a/deepmd/utils/multi_init.py +++ b/deepmd/utils/multi_init.py @@ -33,10 +33,9 @@ def replace_model_params_with_frz_multi_model( t_jdata = get_tensor_by_name(pretrained_model, "train_attr/training_script") except GraphWithoutTensorError as e: raise RuntimeError( - "The input frozen pretrained model: %s has no training script, " + f"The input frozen pretrained model: {input} has no training script, " "which is not supported to perform multi-task training. " "Please use the model pretrained with v2.1.5 or higher version of DeePMD-kit." - % input ) from e pretrained_jdata = json.loads(t_jdata) diff --git a/deepmd/utils/neighbor_stat.py b/deepmd/utils/neighbor_stat.py index 982ead3549..51c8c99658 100644 --- a/deepmd/utils/neighbor_stat.py +++ b/deepmd/utils/neighbor_stat.py @@ -210,16 +210,15 @@ def get_stat(self, data: DeepmdDataSystem) -> Tuple[float, np.ndarray]: for mn, dt, jj in self.iterator(data): if np.isinf(dt): log.warning( - "Atoms with no neighbors found in %s. Please make sure it's what you expected." - % jj + f"Atoms with no neighbors found in {jj}. Please make sure it's what you expected." ) if dt < min_nbor_dist: if math.isclose(dt, 0.0, rel_tol=1e-6): # it's unexpected that the distance between two atoms is zero # zero distance will cause nan (#874) raise RuntimeError( - "Some atoms are overlapping in %s. Please check your" - " training data to remove duplicated atoms." % jj + f"Some atoms are overlapping in {jj}. Please check your" + " training data to remove duplicated atoms." ) min_nbor_dist = dt max_nbor_size = np.maximum(mn, max_nbor_size) diff --git a/deepmd_utils/entrypoints/doc.py b/deepmd_utils/entrypoints/doc.py index 2c2c5e8451..457796c026 100644 --- a/deepmd_utils/entrypoints/doc.py +++ b/deepmd_utils/entrypoints/doc.py @@ -16,5 +16,5 @@ def doc_train_input(*, out_type: str = "rst", **kwargs): elif out_type == "json": doc_str = gen_json() else: - raise RuntimeError("Unsupported out type %s" % out_type) + raise RuntimeError(f"Unsupported out type {out_type}") print(doc_str) # noqa: T201 diff --git a/deepmd_utils/env.py b/deepmd_utils/env.py index b1d4958ed8..75a3ecf288 100644 --- a/deepmd_utils/env.py +++ b/deepmd_utils/env.py @@ -22,7 +22,7 @@ global_float_prec = "float" else: raise RuntimeError( - "Unsupported float precision option: %s. Supported: high," + f"Unsupported float precision option: {dp_float_prec}. Supported: high," "low. Please set precision with environmental variable " - "DP_INTERFACE_PREC." % dp_float_prec + "DP_INTERFACE_PREC." ) diff --git a/deepmd_utils/main.py b/deepmd_utils/main.py index ae4852acc9..82dacdbac2 100644 --- a/deepmd_utils/main.py +++ b/deepmd_utils/main.py @@ -544,7 +544,7 @@ def main_parser() -> argparse.ArgumentParser: # --version parser.add_argument( - "--version", action="version", version="DeePMD-kit v%s" % __version__ + "--version", action="version", version=f"DeePMD-kit v{__version__}" ) # * train nvnmd script ****************************************************************** diff --git a/deepmd_utils/utils/data.py b/deepmd_utils/utils/data.py index 534216dfaf..6494d25675 100644 --- a/deepmd_utils/utils/data.py +++ b/deepmd_utils/utils/data.py @@ -359,7 +359,7 @@ def get_natoms_vec(self, ntypes: int): def avg(self, key): """Return the average value of an item.""" if key not in self.data_dict.keys(): - raise RuntimeError("key %s has not been added" % key) + raise RuntimeError(f"key {key} has not been added") info = self.data_dict[key] ndof = info["ndof"] eners = [] @@ -568,7 +568,7 @@ def _load_data( data = np.repeat(data, repeat).reshape([nframes, -1]) return np.float32(1.0), data elif must: - raise RuntimeError("%s not found!" % path) + raise RuntimeError(f"{path} not found!") else: data = np.full([nframes, ndof], default, dtype=dtype) if repeat != 1: diff --git a/deepmd_utils/utils/data_system.py b/deepmd_utils/utils/data_system.py index f83f587590..a2abca85db 100644 --- a/deepmd_utils/utils/data_system.py +++ b/deepmd_utils/utils/data_system.py @@ -559,7 +559,7 @@ def print_summary(self, name): ) log.info("found %d system(s):" % self.nsystems) log.info( - ("%s " % self._format_name_length("system", sys_width)) + ("{} ".format(self._format_name_length("system", sys_width))) + ("%6s %6s %6s %9s %3s" % ("natoms", "bch_sz", "n_bch", "prob", "pbc")) ) for ii in range(self.nsystems): diff --git a/deepmd_utils/utils/path.py b/deepmd_utils/utils/path.py index a8e4bc329f..751ee3731e 100644 --- a/deepmd_utils/utils/path.py +++ b/deepmd_utils/utils/path.py @@ -39,7 +39,7 @@ def __new__(cls, path: str): # assume h5 if it is not dir # TODO: check if it is a real h5? or just check suffix? return super().__new__(DPH5Path) - raise FileNotFoundError("%s not found" % path) + raise FileNotFoundError(f"{path} not found") return super().__new__(cls) @abstractmethod diff --git a/doc/conf.py b/doc/conf.py index fae850a132..09dc43fa68 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -174,7 +174,7 @@ def setup(app): import typing for typing_type in typing.__all__: - numpydoc_xref_aliases[typing_type] = "typing.%s" % typing_type + numpydoc_xref_aliases[typing_type] = f"typing.{typing_type}" rst_epilog = f""" .. |ACTIVATION_FN| replace:: {list_to_doc(ACTIVATION_FN_DICT.keys())} diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index f348a3dbe9..059612f7af 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -25,9 +25,9 @@ namespace hpp { **/ struct deepmd_exception : public std::runtime_error { public: - deepmd_exception() : runtime_error("DeePMD-kit C API Error!"){}; + deepmd_exception() : runtime_error("DeePMD-kit C API Error!") {}; deepmd_exception(const std::string &msg) - : runtime_error(std::string("DeePMD-kit C API Error: ") + msg){}; + : runtime_error(std::string("DeePMD-kit C API Error: ") + msg) {}; }; } // namespace hpp } // namespace deepmd @@ -620,7 +620,7 @@ class DeepPot { /** * @brief DP constructor without initialization. **/ - DeepPot() : dp(nullptr){}; + DeepPot() : dp(nullptr) {}; ~DeepPot() { DP_DeleteDeepPot(dp); }; /** * @brief DP constructor with initialization. @@ -1160,7 +1160,7 @@ class DeepPotModelDevi { /** * @brief DP model deviation constructor without initialization. **/ - DeepPotModelDevi() : dp(nullptr){}; + DeepPotModelDevi() : dp(nullptr) {}; ~DeepPotModelDevi() { DP_DeleteDeepPotModelDevi(dp); }; /** * @brief DP model deviation constructor with initialization. @@ -1785,7 +1785,7 @@ class DeepTensor { /** * @brief Deep Tensor constructor without initialization. **/ - DeepTensor() : dt(nullptr){}; + DeepTensor() : dt(nullptr) {}; ~DeepTensor() { DP_DeleteDeepTensor(dt); }; /** * @brief DeepTensor constructor with initialization. @@ -2161,7 +2161,7 @@ class DipoleChargeModifier { /** * @brief DipoleChargeModifier constructor without initialization. **/ - DipoleChargeModifier() : dcm(nullptr){}; + DipoleChargeModifier() : dcm(nullptr) {}; ~DipoleChargeModifier() { DP_DeleteDipoleChargeModifier(dcm); }; /** * @brief DipoleChargeModifier constructor with initialization. diff --git a/source/api_c/tests/test_deeppot_a_hpp.cc b/source/api_c/tests/test_deeppot_a_hpp.cc index 814122d538..388b53fdfd 100644 --- a/source/api_c/tests/test_deeppot_a_hpp.cc +++ b/source/api_c/tests/test_deeppot_a_hpp.cc @@ -140,7 +140,7 @@ TYPED_TEST(TestInferDeepPotAHPP, cpu_build_nlist_numfv) { public: MyModel(deepmd::hpp::DeepPot& dp_, const std::vector& atype_) - : mydp(dp_), atype(atype_){}; + : mydp(dp_), atype(atype_) {}; virtual void compute(double& ener, std::vector& force, std::vector& virial, diff --git a/source/api_cc/include/DataModifier.h b/source/api_cc/include/DataModifier.h index 0f46b5e0f8..9933f034a6 100644 --- a/source/api_cc/include/DataModifier.h +++ b/source/api_cc/include/DataModifier.h @@ -14,7 +14,7 @@ class DipoleChargeModifierBase { /** * @brief Dipole charge modifier without initialization. **/ - DipoleChargeModifierBase(){}; + DipoleChargeModifierBase() {}; /** * @brief Dipole charge modifier without initialization. * @param[in] model The name of the frozen model file. @@ -24,7 +24,7 @@ class DipoleChargeModifierBase { DipoleChargeModifierBase(const std::string& model, const int& gpu_rank = 0, const std::string& name_scope = ""); - virtual ~DipoleChargeModifierBase(){}; + virtual ~DipoleChargeModifierBase() {}; /** * @brief Initialize the dipole charge modifier. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/include/DeepPot.h b/source/api_cc/include/DeepPot.h index fa35d4b13a..3da5c34184 100644 --- a/source/api_cc/include/DeepPot.h +++ b/source/api_cc/include/DeepPot.h @@ -15,8 +15,8 @@ class DeepPotBase { /** * @brief DP constructor without initialization. **/ - DeepPotBase(){}; - virtual ~DeepPotBase(){}; + DeepPotBase() {}; + virtual ~DeepPotBase() {}; /** * @brief DP constructor with initialization. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/include/DeepTensor.h b/source/api_cc/include/DeepTensor.h index 5592942d87..f355413d80 100644 --- a/source/api_cc/include/DeepTensor.h +++ b/source/api_cc/include/DeepTensor.h @@ -15,8 +15,8 @@ class DeepTensorBase { /** * @brief Deep Tensor constructor without initialization. **/ - DeepTensorBase(){}; - virtual ~DeepTensorBase(){}; + DeepTensorBase() {}; + virtual ~DeepTensorBase() {}; /** * @brief Deep Tensor constructor with initialization.. * @param[in] model The name of the frozen model file. diff --git a/source/api_cc/include/common.h b/source/api_cc/include/common.h index 7982c4f89d..ede34b68c9 100644 --- a/source/api_cc/include/common.h +++ b/source/api_cc/include/common.h @@ -170,9 +170,9 @@ void load_op_library(); **/ struct tf_exception : public deepmd::deepmd_exception { public: - tf_exception() : deepmd::deepmd_exception("TensorFlow Error!"){}; + tf_exception() : deepmd::deepmd_exception("TensorFlow Error!") {}; tf_exception(const std::string& msg) - : deepmd::deepmd_exception(std::string("TensorFlow Error: ") + msg){}; + : deepmd::deepmd_exception(std::string("TensorFlow Error: ") + msg) {}; }; /** diff --git a/source/api_cc/src/DataModifier.cc b/source/api_cc/src/DataModifier.cc index d44d552bb2..e9aee65de2 100644 --- a/source/api_cc/src/DataModifier.cc +++ b/source/api_cc/src/DataModifier.cc @@ -15,7 +15,7 @@ DipoleChargeModifier::DipoleChargeModifier(const std::string& model, init(model, gpu_rank, name_scope_); } -DipoleChargeModifier::~DipoleChargeModifier(){}; +DipoleChargeModifier::~DipoleChargeModifier() {}; void DipoleChargeModifier::init(const std::string& model, const int& gpu_rank, diff --git a/source/api_cc/tests/test_deeppot_a.cc b/source/api_cc/tests/test_deeppot_a.cc index 067b2ff524..ae1384e8a3 100644 --- a/source/api_cc/tests/test_deeppot_a.cc +++ b/source/api_cc/tests/test_deeppot_a.cc @@ -143,7 +143,7 @@ TYPED_TEST(TestInferDeepPotA, cpu_build_nlist_numfv) { public: MyModel(deepmd::DeepPot& dp_, const std::vector& atype_) - : mydp(dp_), atype(atype_){}; + : mydp(dp_), atype(atype_) {}; virtual void compute(double& ener, std::vector& force, std::vector& virial, diff --git a/source/api_cc/tests/test_deeppot_r.cc b/source/api_cc/tests/test_deeppot_r.cc index 8eec36522e..86fa4b091f 100644 --- a/source/api_cc/tests/test_deeppot_r.cc +++ b/source/api_cc/tests/test_deeppot_r.cc @@ -143,7 +143,7 @@ TYPED_TEST(TestInferDeepPotR, cpu_build_nlist_numfv) { public: MyModel(deepmd::DeepPot& dp_, const std::vector& atype_) - : mydp(dp_), atype(atype_){}; + : mydp(dp_), atype(atype_) {}; virtual void compute(double& ener, std::vector& force, std::vector& virial, diff --git a/source/install/build_tf.py b/source/install/build_tf.py index 15847d2c21..13f773ce2c 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -192,7 +192,7 @@ def path(self) -> Path: @property def gzip_path(self) -> Path: if self.gzip is None: - raise RuntimeError("gzip is None for %s" % self.path) + raise RuntimeError(f"gzip is None for {self.path}") return PACKAGE_DIR / self.gzip @property @@ -257,10 +257,9 @@ def __call__(self): dd() else: dlog.info( - "Skip installing %s, which has been already installed" - % dd.__class__.__name__ + f"Skip installing {dd.__class__.__name__}, which has been already installed" ) - dlog.info("Start installing %s..." % self.__class__.__name__) + dlog.info(f"Start installing {self.__class__.__name__}...") with tempfile.TemporaryDirectory() as tmpdirname: self._prefix = Path(tmpdirname) self.build() @@ -621,7 +620,7 @@ def dependencies(self) -> Dict[str, Build]: def build(self): tf_res = self.resources["tensorflow"] - src = tf_res.gzip_path / ("tensorflow-%s" % self.version) + src = tf_res.gzip_path / (f"tensorflow-{self.version}") with set_directory(src): # configure -- need bazelisk in PATH call( diff --git a/source/lib/include/errors.h b/source/lib/include/errors.h index 6687d2afa2..d0de379537 100644 --- a/source/lib/include/errors.h +++ b/source/lib/include/errors.h @@ -10,15 +10,15 @@ namespace deepmd { **/ struct deepmd_exception : public std::runtime_error { public: - deepmd_exception() : runtime_error("DeePMD-kit Error!"){}; + deepmd_exception() : runtime_error("DeePMD-kit Error!") {}; deepmd_exception(const std::string& msg) - : runtime_error(std::string("DeePMD-kit Error: ") + msg){}; + : runtime_error(std::string("DeePMD-kit Error: ") + msg) {}; }; struct deepmd_exception_oom : public deepmd_exception { public: - deepmd_exception_oom() : deepmd_exception("DeePMD-kit OOM!"){}; + deepmd_exception_oom() : deepmd_exception("DeePMD-kit OOM!") {}; deepmd_exception_oom(const std::string& msg) - : deepmd_exception(std::string("DeePMD-kit OOM: ") + msg){}; + : deepmd_exception(std::string("DeePMD-kit OOM: ") + msg) {}; }; }; // namespace deepmd diff --git a/source/lib/include/neighbor_list.h b/source/lib/include/neighbor_list.h index eb510eb25b..39c2064b56 100644 --- a/source/lib/include/neighbor_list.h +++ b/source/lib/include/neighbor_list.h @@ -26,13 +26,13 @@ struct InputNlist { int* numneigh; /// Array stores the core region atom's neighbor index int** firstneigh; - InputNlist() : inum(0), ilist(NULL), numneigh(NULL), firstneigh(NULL){}; + InputNlist() : inum(0), ilist(NULL), numneigh(NULL), firstneigh(NULL) {}; InputNlist(int inum_, int* ilist_, int* numneigh_, int** firstneigh_) : inum(inum_), ilist(ilist_), numneigh(numneigh_), - firstneigh(firstneigh_){}; - ~InputNlist(){}; + firstneigh(firstneigh_) {}; + ~InputNlist() {}; }; /** diff --git a/source/lmp/pppm_dplr.h b/source/lmp/pppm_dplr.h index e87fc6a241..1484a16e72 100644 --- a/source/lmp/pppm_dplr.h +++ b/source/lmp/pppm_dplr.h @@ -25,7 +25,7 @@ class PPPMDPLR : public PPPM { #else PPPMDPLR(class LAMMPS *); #endif - ~PPPMDPLR() override{}; + ~PPPMDPLR() override {}; void init() override; const std::vector &get_fele() const { return fele; }; diff --git a/source/md/include/Integrator.h b/source/md/include/Integrator.h index 73e84e7097..945109fb20 100644 --- a/source/md/include/Integrator.h +++ b/source/md/include/Integrator.h @@ -9,7 +9,7 @@ template class Integrator { public: - Integrator() : massConst(UnitManager::IntegratorMassConstant){}; + Integrator() : massConst(UnitManager::IntegratorMassConstant) {}; public: void stepVeloc(std::vector& vv, diff --git a/source/md/include/Tabulated.h b/source/md/include/Tabulated.h index 5ab6e02bc3..4c618db213 100644 --- a/source/md/include/Tabulated.h +++ b/source/md/include/Tabulated.h @@ -13,7 +13,7 @@ typedef float VALUETYPE; class Tabulated { public: - Tabulated(){}; + Tabulated() {}; Tabulated(const VALUETYPE rc, const VALUETYPE hh, const std::vector& tab); diff --git a/source/md/include/Trajectory.h b/source/md/include/Trajectory.h index 862b393ea4..52964f3b37 100644 --- a/source/md/include/Trajectory.h +++ b/source/md/include/Trajectory.h @@ -11,7 +11,7 @@ class XtcSaver { public: - XtcSaver() : inited(false), prec(1000){}; + XtcSaver() : inited(false), prec(1000) {}; ~XtcSaver(); XtcSaver(const char *filename, const int &natoms); bool reinit(const char *filename, const int &natoms); @@ -33,7 +33,7 @@ class XtcSaver { class TrrSaver { public: - TrrSaver() : inited(false), lambda(0){}; + TrrSaver() : inited(false), lambda(0) {}; ~TrrSaver(); TrrSaver(const char *filename, const int &natoms); bool reinit(const char *filename, const int &natoms); diff --git a/source/md/include/UnitManager.h b/source/md/include/UnitManager.h index 70393c406e..9bd1e75b9a 100644 --- a/source/md/include/UnitManager.h +++ b/source/md/include/UnitManager.h @@ -5,7 +5,7 @@ class UnitManager { protected: - UnitManager(){}; + UnitManager() {}; public: static double Degree2Radian; diff --git a/source/op/add_flt_nvnmd.cc b/source/op/add_flt_nvnmd.cc index 079e05d785..db3554665a 100644 --- a/source/op/add_flt_nvnmd.cc +++ b/source/op/add_flt_nvnmd.cc @@ -52,7 +52,7 @@ template class AddFltNvnmdOp : public OpKernel { public: /// Constructor. - explicit AddFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context){}; + explicit AddFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context) {}; /// Compute the descriptor /// param: context diff --git a/source/op/copy_flt_nvnmd.cc b/source/op/copy_flt_nvnmd.cc index d9a1984115..3ca9b17c31 100644 --- a/source/op/copy_flt_nvnmd.cc +++ b/source/op/copy_flt_nvnmd.cc @@ -50,7 +50,7 @@ template class CopyFltNvnmdOp : public OpKernel { public: /// Constructor. - explicit CopyFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context){}; + explicit CopyFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context) {}; /// Compute the descriptor /// param: context diff --git a/source/op/dotmul_flt_nvnmd.cc b/source/op/dotmul_flt_nvnmd.cc index d7c2c8d3c3..1aca3e8bf8 100644 --- a/source/op/dotmul_flt_nvnmd.cc +++ b/source/op/dotmul_flt_nvnmd.cc @@ -61,7 +61,7 @@ class DotmulFltNvnmdOp : public OpKernel { public: /// Constructor. explicit DotmulFltNvnmdOp(OpKernelConstruction *context) - : OpKernel(context){}; + : OpKernel(context) {}; /// Compute the descriptor /// param: context diff --git a/source/op/flt_nvnmd.cc b/source/op/flt_nvnmd.cc index 634c76b662..4794caeedc 100644 --- a/source/op/flt_nvnmd.cc +++ b/source/op/flt_nvnmd.cc @@ -48,7 +48,7 @@ template class FltNvnmdOp : public OpKernel { public: /// Constructor. - explicit FltNvnmdOp(OpKernelConstruction* context) : OpKernel(context){}; + explicit FltNvnmdOp(OpKernelConstruction* context) : OpKernel(context) {}; /// Compute the descriptor /// param: context diff --git a/source/op/mul_flt_nvnmd.cc b/source/op/mul_flt_nvnmd.cc index d18cf20508..bbbeec073d 100644 --- a/source/op/mul_flt_nvnmd.cc +++ b/source/op/mul_flt_nvnmd.cc @@ -52,7 +52,7 @@ template class MulFltNvnmdOp : public OpKernel { public: /// Constructor. - explicit MulFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context){}; + explicit MulFltNvnmdOp(OpKernelConstruction* context) : OpKernel(context) {}; /// Compute the descriptor /// param: context diff --git a/source/tests/common.py b/source/tests/common.py index 5a7477d7e7..d9b300f640 100644 --- a/source/tests/common.py +++ b/source/tests/common.py @@ -663,8 +663,7 @@ def __init__(self, sys_path, set_prefix, seed=None, shuffle_test=True): ] if any(has_fparam) and (not all(has_fparam)): raise RuntimeError( - "system %s: if any set has frame parameter, then all sets should have frame parameter" - % sys_path + f"system {sys_path}: if any set has frame parameter, then all sets should have frame parameter" ) if all(has_fparam): self.has_fparam = 0 @@ -676,8 +675,7 @@ def __init__(self, sys_path, set_prefix, seed=None, shuffle_test=True): ] if any(has_aparam) and (not all(has_aparam)): raise RuntimeError( - "system %s: if any set has frame parameter, then all sets should have frame parameter" - % sys_path + f"system {sys_path}: if any set has frame parameter, then all sets should have frame parameter" ) if all(has_aparam): self.has_aparam = 0 @@ -767,7 +765,7 @@ def load_data(self, set_name, data_name, shape, is_necessary=True): return data return 1, data elif is_necessary: - raise OSError("%s not found!" % path) + raise OSError(f"{path} not found!") else: data = np.zeros(shape) return 0, data @@ -1021,7 +1019,7 @@ def print_summary(self): sys_width = 42 tmp_msg += "---Summary of DataSystem-----------------------------------------\n" tmp_msg += "find %d system(s):\n" % self.nsystems - tmp_msg += "%s " % self.format_name_length("system", sys_width) + tmp_msg += "{} ".format(self.format_name_length("system", sys_width)) tmp_msg += "{} {} {}\n".format("natoms", "bch_sz", "n_bch") for ii in range(self.nsystems): tmp_msg += "%s %6d %6d %5d\n" % ( From 85dff3b1af3d5a0221120b671814cddd645b78d9 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 23 May 2024 09:53:28 -0400 Subject: [PATCH 22/30] docs: update DPA-1 reference (#3810) This replace the BibTeX reference to the preprint with that to the published version (generated by https://github.com/njzjz/wenxian/issues/23#issuecomment-2125874043). ## Summary by CodeRabbit - **Documentation** - Updated citation information for an article by Zhang et al. to reflect the latest publication details. - Added Xinzijian Liu to the author list. - Updated title, journal, year, volume, issue, pages, and DOI in the citation. - Revised citation reference in the credits documentation. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit 55a1cf672cc95e99fe0ad6d209665c6d17297453) --- CITATIONS.bib | 21 ++++++++++++--------- doc/credits.rst | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/CITATIONS.bib b/CITATIONS.bib index a456e16bf4..a05e7ef9ee 100644 --- a/CITATIONS.bib +++ b/CITATIONS.bib @@ -110,19 +110,22 @@ @article{Wang_NuclFusion_2022_v62_p126013 doi = {10.1088/1741-4326/ac888b}, } -@misc{Zhang_2022_DPA1, - annote = {attention-based descriptor}, +@article{Zhang_NpjComputMater_2024_v10_p94, + annote = {DPA-1, attention-based descriptor}, author = { - Zhang, Duo and Bi, Hangrui and Dai, Fu-Zhi and Jiang, Wanrun and Zhang, - Linfeng and Wang, Han + Duo Zhang and Hangrui Bi and Fu-Zhi Dai and Wanrun Jiang and Xinzijian Liu + and Linfeng Zhang and Han Wang }, title = { - {DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular - Simulation} + {Pretraining of attention-based deep learning potential model for molecular + simulation} }, - publisher = {arXiv}, - year = 2022, - doi = {10.48550/arXiv.2208.08236}, + journal = {Npj Comput. Mater}, + year = 2024, + volume = 10, + issue = 1, + pages = 94, + doi = {10.1038/s41524-024-01278-7}, } @article{Zhang_PhysPlasmas_2020_v27_p122704, diff --git a/doc/credits.rst b/doc/credits.rst index 3fbe1d56d8..d60279614b 100644 --- a/doc/credits.rst +++ b/doc/credits.rst @@ -47,7 +47,7 @@ Cite DeePMD-kit and methods .. bibliography:: :filter: False - Zhang_2022_DPA1 + Zhang_NpjComputMater_2024_v10_p94 - If frame-specific parameters (`fparam`, e.g. electronic temperature) is used, From 709abf20df0d9c782fde0f18936ae519fd93ebdb Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 18 Apr 2024 22:39:01 -0400 Subject: [PATCH 23/30] docs: setup uv for readthedocs (#3685) Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit e71e165dee6dd5d38d75562d420177844b322ad6) --- .readthedocs.yml | 9 ++++++--- doc/requirements.txt | 5 ++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 6f3ff6be3f..e8b81a4109 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -2,7 +2,10 @@ version: 2 build: os: ubuntu-20.04 tools: - python: mambaforge-4.10 -conda: - environment: doc/environment.yml + python: "3.11" + jobs: + post_create_environment: + - pip install uv + post_install: + - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH uv pip install -r doc/requirements.txt formats: all diff --git a/doc/requirements.txt b/doc/requirements.txt index 1d39662bb4..b011900147 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1 +1,4 @@ -.[docs,cpu] +.[docs,cpu,torch] +exhale @ https://github.com/svenevs/exhale/archive/2759a394268307b88f5440487ae0920ee4ebf81e.zip +# https://github.com/mcmtroffaes/sphinxcontrib-bibtex/issues/309 +docutils!=0.18.*,!=0.19.* From 630753c3c6e140a9bf89ca2591c91b8eb116435f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 16 Apr 2024 04:23:58 -0400 Subject: [PATCH 24/30] CI: Accerate GitHub Actions using uv (#3676) Setup [`uv`](https://github.com/astral-sh/uv) in the GitHub Actions, saving several minutes compared to pip. pip: ![image](https://github.com/deepmodeling/deepmd-kit/assets/9496702/547adb02-1bc2-47fb-953d-24d38e3e986d) uv: ![image](https://github.com/deepmodeling/deepmd-kit/assets/9496702/6ec6536b-5dcf-44c6-a4b6-c78d08b9c4f8) Using `uv` has some limitations, but it's good to use it in the CI. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit 219b19ed46d63092dc8434cf4885135b0813da6e) Signed-off-by: Jinzhe Zeng --- .github/workflows/build_cc.yml | 4 ++-- .github/workflows/test_cc.yml | 7 ++++--- .github/workflows/test_cuda.yml | 8 +++----- .github/workflows/test_python.yml | 12 +++++++----- source/install/docker/Dockerfile | 6 ++++-- 5 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build_cc.yml b/.github/workflows/build_cc.yml index f029517d80..9fd0a06337 100644 --- a/.github/workflows/build_cc.yml +++ b/.github/workflows/build_cc.yml @@ -24,9 +24,9 @@ jobs: - uses: actions/setup-python@v5 with: python-version: '3.11' - cache: 'pip' - uses: lukka/get-cmake@latest - - run: python -m pip install tensorflow + - run: python -m pip install uv + - run: python -m uv pip install --system tensorflow - run: | wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ && sudo dpkg -i cuda-keyring_1.0-1_all.deb \ diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index 4a2ba7968a..500cb15e3a 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -20,7 +20,8 @@ jobs: with: mpi: mpich - uses: lukka/get-cmake@latest - - run: python -m pip install tensorflow + - run: python -m pip install uv + - run: python -m uv pip install --system tensorflow # https://github.com/actions/runner-images/issues/9491 - name: Fix kernel mmap rnd bits run: sudo sysctl vm.mmap_rnd_bits=28 @@ -37,8 +38,8 @@ jobs: # ASE issue: https://gitlab.com/ase/ase/-/merge_requests/2843 # TODO: remove ase version when ase has new release - run: | - python -m pip install -U pip - python -m pip install -e .[cpu,test,lmp] mpi4py "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" + export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') + python -m uv pip install --system -e .[cpu,test,lmp] mpi4py "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" env: DP_BUILD_TESTING: 1 if: ${{ !matrix.check_memleak }} diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 6435789933..d335dd0833 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -33,11 +33,9 @@ jobs: && sudo apt-get update \ && sudo apt-get -y install cuda-12-2 libcudnn8=8.9.5.*-1+cuda12.2 if: false # skip as we use nvidia image - - name: Set PyPI mirror for Aliyun cloud machine - run: python -m pip config --user set global.index-url https://mirrors.aliyun.com/pypi/simple/ - - run: python -m pip install -U "pip>=21.3.1,!=23.0.0" - - run: python -m pip install "tensorflow>=2.15.0rc0" - - run: python -m pip install -v -e .[gpu,test,lmp,cu12] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" + - run: python -m pip install -U uv + - run: python -m uv pip install --system "tensorflow>=2.15.0rc0" + - run: python -m uv pip install --system -v -e .[gpu,test,lmp,cu12] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" env: DP_BUILD_TESTING: 1 DP_VARIANT: cuda diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 28df7b5625..a072ad61c5 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -21,18 +21,20 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: 'pip' - uses: mpi4py/setup-mpi@v1 if: ${{ matrix.tf == '' }} with: mpi: openmpi - # https://github.com/pypa/pip/issues/11770 - - run: python -m pip install -U "pip>=21.3.1,!=23.0.0" - - run: pip install -e .[cpu,test] + - run: python -m pip install -U uv + - run: uv pip install --system -e .[cpu,test] env: + # Please note that uv has some issues with finding + # existing TensorFlow package. Currently, it uses + # TensorFlow in the build dependency, but if it + # changes, setting `TENSORFLOW_ROOT`. TENSORFLOW_VERSION: ${{ matrix.tf }} DP_BUILD_TESTING: 1 - - run: pip install horovod mpi4py + - run: uv pip install --system --no-build-isolation horovod mpi4py if: ${{ matrix.tf == '' }} env: HOROVOD_WITH_TENSORFLOW: 1 diff --git a/source/install/docker/Dockerfile b/source/install/docker/Dockerfile index 26b7be9f19..237480bfb8 100644 --- a/source/install/docker/Dockerfile +++ b/source/install/docker/Dockerfile @@ -1,12 +1,14 @@ FROM python:3.11 AS compile-image ARG VARIANT="" ARG CUDA_VERSION="12" -RUN python -m venv /opt/deepmd-kit +RUN python -m pip install uv +RUN python -m uv venv /opt/deepmd-kit # Make sure we use the virtualenv ENV PATH="/opt/deepmd-kit/bin:$PATH" +ENV VIRTUAL_ENV="/opt/deepmd-kit" # Install package COPY dist /dist -RUN pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi]" \ +RUN uv pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi]" \ && dp -h \ && lmp -h \ && dp_ipi \ From a71e0997cd6e72bfd2ef3cec4bee20e503d0e80d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 2 Jun 2024 21:26:19 -0400 Subject: [PATCH 25/30] ci: bump ase to 3.23.0 (#3846) Fix #3520. - **Chores** - Simplified the installation process by removing specific version references for the `ase` package in workflow files. - Updated the dependency version of "ase" to `>=3.23.0` in `pyproject.toml` to ensure compatibility and resolve previous issues. (cherry picked from commit f23c77ebd81c04fd6aec4622054bcd978d9b5472) Signed-off-by: Jinzhe Zeng --- .github/workflows/test_cc.yml | 4 +--- .github/workflows/test_cuda.yml | 2 +- pyproject.toml | 4 +++- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index 500cb15e3a..548b3e2b18 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -35,11 +35,9 @@ jobs: CMAKE_GENERATOR: Ninja CXXFLAGS: ${{ matrix.check_memleak && '-fsanitize=leak' || '' }} # test lammps - # ASE issue: https://gitlab.com/ase/ase/-/merge_requests/2843 - # TODO: remove ase version when ase has new release - run: | export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') - python -m uv pip install --system -e .[cpu,test,lmp] mpi4py "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" + python -m uv pip install --system -e .[cpu,test,lmp] mpi4py env: DP_BUILD_TESTING: 1 if: ${{ !matrix.check_memleak }} diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index d335dd0833..1e3f199ed3 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -35,7 +35,7 @@ jobs: if: false # skip as we use nvidia image - run: python -m pip install -U uv - run: python -m uv pip install --system "tensorflow>=2.15.0rc0" - - run: python -m uv pip install --system -v -e .[gpu,test,lmp,cu12] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" + - run: python -m uv pip install --system -v -e .[gpu,test,lmp,cu12] env: DP_BUILD_TESTING: 1 DP_VARIANT: cuda diff --git a/pyproject.toml b/pyproject.toml index 373b763146..2523dd7180 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,9 @@ repository = "https://github.com/deepmodeling/deepmd-kit" [tool.deepmd_build_backend.optional-dependencies] test = [ "dpdata>=0.2.7", - "ase", + # ASE issue: https://gitlab.com/ase/ase/-/merge_requests/2843 + # fixed in 3.23.0 + "ase>=3.23.0", "pytest", "pytest-cov", "pytest-sugar", From 128654cebed31b1e3b05e518be9a00d3b7be0d77 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 10 Jun 2024 22:57:24 -0400 Subject: [PATCH 26/30] ci(build): use uv for cibuildwheel (#3695) Should save 1 minute per build on average. ## Summary by CodeRabbit - **New Features** - Introduced platform-specific constraints for `h5py` when using TensorFlow, ensuring better compatibility and performance. - **Chores** - Updated build configuration to include the `uv` tool for building wheels. - Modified `.gitignore` to exclude the `.uv/` directory. --------- Signed-off-by: Jinzhe Zeng (cherry picked from commit a7ab1afe3a6be8080a7116d3bd1f9f603c3f14cf) --- .github/workflows/build_wheel.yml | 23 +++++++---------------- .gitignore | 1 + backend/find_tensorflow.py | 7 +++++++ pyproject.toml | 7 +++++++ 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 273fe7dccd..e203737424 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -65,23 +65,19 @@ jobs: with: # https://github.com/pypa/setuptools_scm/issues/480 fetch-depth: 0 + - name: Install uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + if: runner.os != 'Linux' - uses: docker/setup-qemu-action@v3 name: Setup QEMU if: matrix.platform_id == 'manylinux_aarch64' && matrix.os == 'ubuntu-latest' # detect version in advance. See #3168 - - uses: actions/setup-python@v5 - name: Install Python - with: - python-version: '3.11' - cache: 'pip' - if: matrix.dp_pkg_name == 'deepmd-kit-cu11' - run: | - python -m pip install setuptools_scm - python -c "from setuptools_scm import get_version;print('SETUPTOOLS_SCM_PRETEND_VERSION='+get_version())" >> $GITHUB_ENV + echo "SETUPTOOLS_SCM_PRETEND_VERSION=$(pipx run uv tool run --from setuptools_scm python -m setuptools_scm)" >> $GITHUB_ENV rm -rf .git if: matrix.dp_pkg_name == 'deepmd-kit-cu11' - name: Build wheels - uses: pypa/cibuildwheel@v2.18 + uses: pypa/cibuildwheel@v2.19 env: CIBW_BUILD_VERBOSITY: 1 CIBW_ARCHS: all @@ -89,6 +85,7 @@ jobs: DP_VARIANT: ${{ matrix.dp_variant }} CUDA_VERSION: ${{ matrix.cuda_version }} DP_PKG_NAME: ${{ matrix.dp_pkg_name }} + CIBW_BUILD_FRONTEND: 'build[uv]' - uses: actions/upload-artifact@v4 with: name: cibw-cp${{ matrix.python }}-${{ matrix.platform_id }}-cu${{ matrix.cuda_version }}-${{ strategy.job-index }} @@ -100,14 +97,8 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v5 - name: Install Python - with: - python-version: '3.11' - cache: 'pip' - - run: python -m pip install build - name: Build sdist - run: python -m build --sdist + run: pipx run uv tool run --with build[uv] --from build python -m build --installer uv --sdist - uses: actions/upload-artifact@v4 with: diff --git a/.gitignore b/.gitignore index 82d3e4a7da..572f9bf229 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,4 @@ build_cc_tests build_c_tests build_c/ libdeepmd_c/ +.uv/ diff --git a/backend/find_tensorflow.py b/backend/find_tensorflow.py index d85c3a6a18..522ac9044f 100644 --- a/backend/find_tensorflow.py +++ b/backend/find_tensorflow.py @@ -150,11 +150,18 @@ def get_tf_requirement(tf_version: str = "") -> dict: "tensorflow; platform_machine=='aarch64' or (platform_machine=='arm64' and platform_system == 'Darwin')", # https://github.com/tensorflow/tensorflow/issues/61830 "tensorflow-cpu!=2.15.*; platform_system=='Windows'", + # TODO: build(wheel): unpin h5py on aarch64 + # Revert after https://github.com/h5py/h5py/issues/2408 is fixed; + # or set UV_PREFER_BINARY when https://github.com/astral-sh/uv/issues/1794 is resolved. + # 3.6.0 is the first version to have aarch64 wheels. + "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", *extra_requires, ], "gpu": [ "tensorflow", "tensorflow-metal; platform_machine=='arm64' and platform_system == 'Darwin'", + # See above. + "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", *extra_requires, ], **extra_select, diff --git a/pyproject.toml b/pyproject.toml index 2523dd7180..815530ba53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ dependencies = [ 'typing_extensions; python_version < "3.8"', 'importlib_metadata>=1.4; python_version < "3.8"', 'h5py', + "h5py>=3.6.0,<3.11.0; platform_system=='Linux' and platform_machine=='aarch64'", 'wcmatch', 'packaging', ] @@ -224,6 +225,12 @@ before-all = [ """rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux""", """{ if [ "$(uname -m)" = "x86_64" ] ; then yum config-manager --add-repo http://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo && yum install -y cuda-nvcc-${CUDA_VERSION/./-} cuda-cudart-devel-${CUDA_VERSION/./-}; fi }""", "yum install -y mpich-devel", + # uv is not available in the old manylinux image + """{ if [ "$(uname -m)" = "x86_64" ] ; then pipx install uv; fi }""", +] +before-build = [ + # old build doesn't support uv + """{ if [ "$(uname -m)" = "x86_64" ] ; then uv pip install --system -U build; fi }""", ] [tool.cibuildwheel.windows] From bf36e267570421501c7becc92fe8097f97ab2c03 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 21 Jun 2024 02:36:47 -0400 Subject: [PATCH 27/30] chore(ci): workaround to retry `error decoding response body` from uv (#3889) This PR uses a shell wrapper to check if the `error decoding response body` error message is in the uv stderr and retry if so. It is just a workaround for https://github.com/astral-sh/uv/issues/2586 and https://github.com/astral-sh/uv/issues/3514 and hope the upstream can fix it. Note that this PR does nothing with cibuildwheel. It's unclear how to retry with certain errors under its complex logic (feature requested in https://github.com/pypa/cibuildwheel/issues/1846). - **Chores** - Standardized installation process for TensorFlow, Torch, and other dependencies across workflows by using `uv_with_retry.sh` script to ensure reliable installations. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> (cherry picked from commit 359d19b24907dfbf93c9372754ec603a42702a17) Signed-off-by: Jinzhe Zeng --- .github/workflows/build_cc.yml | 2 +- .github/workflows/test_cc.yml | 4 ++-- .github/workflows/test_cuda.yml | 4 ++-- .github/workflows/test_python.yml | 4 ++-- source/install/uv_with_retry.sh | 32 +++++++++++++++++++++++++++++++ 5 files changed, 39 insertions(+), 7 deletions(-) create mode 100755 source/install/uv_with_retry.sh diff --git a/.github/workflows/build_cc.yml b/.github/workflows/build_cc.yml index 9fd0a06337..d851282cbc 100644 --- a/.github/workflows/build_cc.yml +++ b/.github/workflows/build_cc.yml @@ -26,7 +26,7 @@ jobs: python-version: '3.11' - uses: lukka/get-cmake@latest - run: python -m pip install uv - - run: python -m uv pip install --system tensorflow + - run: source/install/uv_with_retry.sh pip install --system tensorflow - run: | wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ && sudo dpkg -i cuda-keyring_1.0-1_all.deb \ diff --git a/.github/workflows/test_cc.yml b/.github/workflows/test_cc.yml index 548b3e2b18..8525c03648 100644 --- a/.github/workflows/test_cc.yml +++ b/.github/workflows/test_cc.yml @@ -21,7 +21,7 @@ jobs: mpi: mpich - uses: lukka/get-cmake@latest - run: python -m pip install uv - - run: python -m uv pip install --system tensorflow + - run: source/install/uv_with_retry.sh pip install --system tensorflow # https://github.com/actions/runner-images/issues/9491 - name: Fix kernel mmap rnd bits run: sudo sysctl vm.mmap_rnd_bits=28 @@ -37,7 +37,7 @@ jobs: # test lammps - run: | export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') - python -m uv pip install --system -e .[cpu,test,lmp] mpi4py + source/install/uv_with_retry.sh pip install --system -e .[cpu,test,lmp] mpi4py env: DP_BUILD_TESTING: 1 if: ${{ !matrix.check_memleak }} diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 1e3f199ed3..660d022de9 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -34,8 +34,8 @@ jobs: && sudo apt-get -y install cuda-12-2 libcudnn8=8.9.5.*-1+cuda12.2 if: false # skip as we use nvidia image - run: python -m pip install -U uv - - run: python -m uv pip install --system "tensorflow>=2.15.0rc0" - - run: python -m uv pip install --system -v -e .[gpu,test,lmp,cu12] + - run: source/install/uv_with_retry.sh pip install --system "tensorflow>=2.15.0rc0" + - run: source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12] env: DP_BUILD_TESTING: 1 DP_VARIANT: cuda diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index a072ad61c5..e48e00a674 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -26,7 +26,7 @@ jobs: with: mpi: openmpi - run: python -m pip install -U uv - - run: uv pip install --system -e .[cpu,test] + - run: source/install/uv_with_retry.sh pip install --system -e .[cpu,test] env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses @@ -34,7 +34,7 @@ jobs: # changes, setting `TENSORFLOW_ROOT`. TENSORFLOW_VERSION: ${{ matrix.tf }} DP_BUILD_TESTING: 1 - - run: uv pip install --system --no-build-isolation horovod mpi4py + - run: source/install/uv_with_retry.sh pip install --system --no-build-isolation horovod mpi4py if: ${{ matrix.tf == '' }} env: HOROVOD_WITH_TENSORFLOW: 1 diff --git a/source/install/uv_with_retry.sh b/source/install/uv_with_retry.sh new file mode 100755 index 0000000000..2d9a524f6b --- /dev/null +++ b/source/install/uv_with_retry.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# This script is used to retry the uv command if the error "error decoding response body" is encountered. +# See also: +# https://github.com/astral-sh/uv/issues/2586 +# https://github.com/astral-sh/uv/issues/3456 +# https://github.com/astral-sh/uv/issues/3514 +# https://github.com/astral-sh/uv/issues/4402 +tmpstderr=$(mktemp) +max_retry=3 +while true; do + uv "$@" 2> >(tee -a "${tmpstderr}" >&2) + exit_code=$? + # exit if ok + if [ $exit_code -eq 0 ]; then + rm -f "${tmpstderr}" + exit 0 + fi + # check if "error decoding response body" is in the stderr + if grep -q "error decoding response body" "${tmpstderr}"; then + echo "Retrying uv in 1 s..." + max_retry=$((max_retry - 1)) + if [ $max_retry -eq 0 ]; then + echo "Max retry reached, exiting..." + rm -f "${tmpstderr}" + exit 1 + fi + sleep 1 + else + rm -f "${tmpstderr}" + exit $exit_code + fi +done From 1a3668149db7525a1d2647de8f2e5329cd81baea Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Jul 2024 04:28:11 -0400 Subject: [PATCH 28/30] [r2] Fix uv installation with Python 3.7 Signed-off-by: Jinzhe Zeng --- .github/workflows/test_python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index e48e00a674..956c91e491 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -25,7 +25,7 @@ jobs: if: ${{ matrix.tf == '' }} with: mpi: openmpi - - run: python -m pip install -U uv + - run: curl -LsSf https://astral.sh/uv/install.sh | sh - run: source/install/uv_with_retry.sh pip install --system -e .[cpu,test] env: # Please note that uv has some issues with finding From e7f23e6687a491b34c9c3b8868b8f514501a6e48 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Jul 2024 04:32:44 -0400 Subject: [PATCH 29/30] [r2] fix ase version with Python 3.7 Signed-off-by: Jinzhe Zeng --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 815530ba53..7a5f892b6f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,9 @@ test = [ "dpdata>=0.2.7", # ASE issue: https://gitlab.com/ase/ase/-/merge_requests/2843 # fixed in 3.23.0 - "ase>=3.23.0", + # ase>=3.23.0 requires python>=3.8; luckily, we don't test i-PI with python<3.8 + 'ase>=3.23.0;python_version>="3.8"', + 'ase;python_version<"3.8"', "pytest", "pytest-cov", "pytest-sugar", From 2b3519a2a3ec9bf719025efb0115f5601471ce03 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 2 Jul 2024 04:50:48 -0400 Subject: [PATCH 30/30] [r2] fix `test_smoothness_of_stripped_type_embedding_smooth_model_excluded_types` Signed-off-by: Jinzhe Zeng --- source/tests/test_model_se_atten.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py index 1b9dc6c1c9..cb9ff6f5ad 100644 --- a/source/tests/test_model_se_atten.py +++ b/source/tests/test_model_se_atten.py @@ -901,9 +901,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model_excluded_types(self) jdata["model"]["descriptor"]["exclude_types"] = [[0, 0], [0, 1]] jdata["model"]["descriptor"]["set_davg_zero"] = False descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) - jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() - jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() - jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() + jdata["model"]["fitting_net"]["descrpt"] = descrpt fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet(