diff --git a/deepmd_utils/utils/argcheck.py b/deepmd_utils/utils/argcheck.py index 390742edcc..29886dd54f 100644 --- a/deepmd_utils/utils/argcheck.py +++ b/deepmd_utils/utils/argcheck.py @@ -365,7 +365,7 @@ def descrpt_se_atten_common_args(): doc_seed = "Random seed for parameter initialization" doc_exclude_types = "The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1." doc_attn = "The length of hidden vectors in attention layers" - doc_attn_layer = "The number of attention layers. Note that model compression of `se_atten` is only enabled when attn_layer==0 and stripped_type_embedding is True" + doc_attn_layer = "The number of attention layers." doc_attn_dotr = "Whether to do dot product with the normalized relative coordinates" doc_attn_mask = "Whether to do mask on the diagonal in the attention matrix" diff --git a/doc/freeze/compress.md b/doc/freeze/compress.md index 54b9baf7ca..87d4699d2b 100644 --- a/doc/freeze/compress.md +++ b/doc/freeze/compress.md @@ -148,6 +148,8 @@ The model compression interface requires the version of DeePMD-kit used in the o Descriptors with `se_e2_a`, `se_e3`, `se_e2_r` and `se_atten_v2` types are supported by the model compression feature. `Hybrid` mixed with the above descriptors is also supported. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + **Available activation functions for descriptor:** - tanh diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 79c7149a61..0ed73fe203 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -120,6 +120,8 @@ We highly recommend using the version 2.0 of the attention-based descriptor `"se Practical evidence demonstrates that `"se_atten_v2"` offers better and more stable performance compared to `"se_atten"`. +Notice: Model compression for the `se_atten_v2` descriptor is exclusively designed for models with the training parameter {ref}`attn_layer ` set to 0. + ### Fitting `"ener"` DPA-1 only supports `"ener"` fitting type, and you can refer [here](train-energy.md) for detailed information.