From 776a0bedfb166fa4c6e4f3a5a3888a8a86daec34 Mon Sep 17 00:00:00 2001 From: nahso <47053538+nahso@users.noreply.github.com> Date: Mon, 6 May 2024 04:28:21 +0800 Subject: [PATCH] Clarifiy se_atten_v2 compression doc (#3727) https://github.com/deepmodeling/deepmd-kit/issues/3643 - **Documentation** - Simplified the description for the number of attention layers in the code documentation. - Added a notice about model compression compatibility for `se_atten_v2` descriptor in the documentation. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: Jinzhe Zeng (cherry picked from commit 62832e85d3c370c2ce4c66c559ee88094a544198) Signed-off-by: Jinzhe Zeng --- deepmd_utils/utils/argcheck.py | 2 +- doc/freeze/compress.md | 2 ++ doc/model/train-se-atten.md | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd_utils/utils/argcheck.py b/deepmd_utils/utils/argcheck.py index 390742edcc..29886dd54f 100644 --- a/deepmd_utils/utils/argcheck.py +++ b/deepmd_utils/utils/argcheck.py @@ -365,7 +365,7 @@ def descrpt_se_atten_common_args(): doc_seed = "Random seed for parameter initialization" doc_exclude_types = "The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1." doc_attn = "The length of hidden vectors in attention layers" - doc_attn_layer = "The number of attention layers. Note that model compression of `se_atten` is only enabled when attn_layer==0 and stripped_type_embedding is True" + doc_attn_layer = "The number of attention layers." doc_attn_dotr = "Whether to do dot product with the normalized relative coordinates" doc_attn_mask = "Whether to do mask on the diagonal in the attention matrix" diff --git a/doc/freeze/compress.md b/doc/freeze/compress.md index 54b9baf7ca..87d4699d2b 100644 --- a/doc/freeze/compress.md +++ b/doc/freeze/compress.md @@ -148,6 +148,8 @@ The model compression interface requires the version of DeePMD-kit used in the o Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten_v2` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + **Available activation functions for descriptor:** - tanh diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 79c7149a61..0ed73fe203 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -120,6 +120,8 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + ### Fitting `"ener"` DPA-1 only supports `"ener"` fitting type, and you can refer [here](train-energy.md) for detailed information.