diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 5f5bce6256..761862c330 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -890,12 +890,12 @@ def test_dipole( atomic=atomic, must=True, high_prec=False, - type_sel=dp.tselt, + type_sel=dp.get_sel_type(), ) test_data = data.get_test() dipole, numb_test, atype = run_test(dp, test_data, numb_test, data) - sel_type = dp.tselt + sel_type = dp.get_sel_type() sel_natoms = 0 for ii in sel_type: sel_natoms += sum(atype == ii) diff --git a/deepmd/fit/dipole.py b/deepmd/fit/dipole.py index 0d050b5da8..66eec26c49 100644 --- a/deepmd/fit/dipole.py +++ b/deepmd/fit/dipole.py @@ -67,7 +67,7 @@ def __init__( ) -> None: super().__init__(name_scope="DipoleFittingSeA") """Constructor.""" - self.ntypes = descrpt.get_ntypes() # 2 + self.ntypes = descrpt.get_ntypes() self.dim_descrpt = descrpt.get_dim_out() self.n_neuron = neuron self.resnet_dt = resnet_dt @@ -170,7 +170,6 @@ def _build_lower( [0, start_index, 0], [rot_mat.shape[0], start_index + natoms, rot_mat.shape[2]], ) - # paddle.slice(rot_mat, [0, start_index, 0], [-1, natoms, -1]) rot_mat_i = paddle.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) layer = inputs_i for ii in range(0, len(self.n_neuron)): @@ -178,64 +177,25 @@ def _build_lower( layer += self.one_layers[type_i][ii](layer) else: layer = self.one_layers[type_i][ii](layer) - # if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii - 1]: - # layer += one_layer( - # layer, - # self.n_neuron[ii], - # name="layer_" + str(ii) + suffix, - # reuse=reuse, - # seed=self.seed, - # use_timestep=self.resnet_dt, - # activation_fn=self.fitting_activation_fn, - # precision=self.fitting_precision, - # uniform_seed=self.uniform_seed, - # initial_variables=self.fitting_net_variables, - # mixed_prec=self.mixed_prec, - # ) - # else: - # layer = one_layer( - # layer, - # self.n_neuron[ii], - # name="layer_" + str(ii) + suffix, - # reuse=reuse, - # seed=self.seed, - # activation_fn=self.fitting_activation_fn, - # precision=self.fitting_precision, - # uniform_seed=self.uniform_seed, - # initial_variables=self.fitting_net_variables, - # mixed_prec=self.mixed_prec, - # ) + if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift - + # (nframes x natoms) x naxis final_layer = self.final_layers[type_i]( layer, ) - # # (nframes x natoms) x naxis - # final_layer = one_layer( - # layer, - # self.dim_rot_mat_1, - # activation_fn=None, - # name="final_layer" + suffix, - # reuse=reuse, - # seed=self.seed, - # precision=self.fitting_precision, - # uniform_seed=self.uniform_seed, - # initial_variables=self.fitting_net_variables, - # mixed_prec=self.mixed_prec, - # final_layer=True, - # ) + if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift # (nframes x natoms) x 1 * naxis final_layer = paddle.reshape( final_layer, [paddle.shape(inputs)[0] * natoms, 1, self.dim_rot_mat_1] - ) # natoms=64, self.dim_rot_mat_1=100 + ) # (nframes x natoms) x 1 x 3(coord) final_layer = paddle.matmul(final_layer, rot_mat_i) # nframes x natoms x 3 final_layer = paddle.reshape(final_layer, [paddle.shape(inputs)[0], natoms, 3]) - return final_layer # [1, 64, 3] + return final_layer def forward( self, @@ -282,7 +242,7 @@ def forward( if type_embedding is not None: nloc_mask = paddle.reshape( - paddle.tile(paddle.repeat(self.sel_mask, natoms[2:]), [nframes]), + paddle.tile(paddle.repeat_interleave(self.sel_mask, natoms[2:]), [nframes]), [nframes, -1], ) atype_nall = paddle.reshape(atype, [-1, natoms[1]]) @@ -293,7 +253,7 @@ def forward( self.nloc_masked = paddle.shape( paddle.reshape(self.atype_nloc_masked, [nframes, -1]) )[1] - atype_embed = paddle.nn.embedding_lookup( + atype_embed = nn.embedding_lookup( type_embedding, self.atype_nloc_masked ) else: @@ -304,10 +264,10 @@ def forward( if atype_embed is None: count = 0 outs_list = [] - for type_i in range(self.ntypes): # 2 + for type_i in range(self.ntypes): if type_i not in self.sel_type: start_index += natoms[2 + type_i] - continue # sel_type是0,所以就循环了一次 + continue final_layer = self._build_lower( start_index, natoms[2 + type_i], @@ -321,8 +281,7 @@ def forward( # concat the results outs_list.append(final_layer) count += 1 - - outs = paddle.concat(outs_list, axis=1) # [1, 64, 3] + outs = paddle.concat(outs_list, axis=1) else: inputs = paddle.reshape( paddle.reshape(inputs, [nframes, natoms[0], self.dim_descrpt])[ @@ -349,13 +308,10 @@ def forward( final_layer = self._build_lower( 0, self.nloc_masked, inputs, rot_mat, suffix=suffix, reuse=reuse ) - # nframes x natoms x 3 outs = paddle.reshape(final_layer, [nframes, self.nloc_masked, 3]) - # paddle.summary.histogram("fitting_net_output", outs) return paddle.reshape(outs, [-1]) - # return tf.reshape(outs, [tf.shape(inputs)[0] * natoms[0] * 3 // 3]) def init_variables( self, diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 8a524a684d..38d0b6d166 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -81,7 +81,7 @@ def __init__( auto_batch_size: Union[bool, int, AutoBatchSize] = False, ): jdata = j_loader( - "input.json" if os.path.isfile("input.json") else "dipole_input.json" + "input.json" if os.path.exists("input.json") else "dipole_input.json" ) remove_comment_in_json(jdata) model_param = j_must_have(jdata, "model") @@ -150,7 +150,7 @@ def __init__( fitting_param.pop("type", None) fitting = dipole.DipoleFittingSeA(**fitting_param) else: - pass + raise NotImplementedError() else: self.fitting_dict = {} self.fitting_type_dict = {} @@ -361,7 +361,6 @@ def __init__( @property @lru_cache(maxsize=None) def model_type(self) -> str: - return self.model.model_type """Get type of model. diff --git a/deepmd/loss/tensor.py b/deepmd/loss/tensor.py index ad0ba97b16..68dcdcb40c 100644 --- a/deepmd/loss/tensor.py +++ b/deepmd/loss/tensor.py @@ -174,7 +174,6 @@ def eval(self, model, batch_data, natoms): ) l2_l, l2_more = self.compute_loss( - # 0.0, natoms, model_dict, batch_data 0.0, model_inputs["natoms_vec"], model_pred, diff --git a/deepmd/model/tensor.py b/deepmd/model/tensor.py index 240bc99f8a..c04912c6fe 100644 --- a/deepmd/model/tensor.py +++ b/deepmd/model/tensor.py @@ -154,9 +154,7 @@ def forward( rot_mat = self.descrpt.get_rot_mat() rot_mat = paddle.clone(rot_mat, name="o_rot_mat" + suffix) - # rot_mat = paddle.fluid.layers.assign(rot_mat, name="o_rot_mat" + suffix) - # rot_mat = paddle.tensor.clone(rot_mat, name="o_rot_mat" + suffix) - + output = self.fitting( dout, rot_mat, natoms, input_dict, reuse=reuse, suffix=suffix ) diff --git a/deepmd/train/trainer.py b/deepmd/train/trainer.py index a51d0c7182..3424aa95c8 100644 --- a/deepmd/train/trainer.py +++ b/deepmd/train/trainer.py @@ -142,7 +142,7 @@ def _init_param(self, jdata): fitting_param.pop("type") self.fitting = dipole.DipoleFittingSeA(**fitting_param) else: - pass + raise NotImplementedError else: self.fitting_dict = {} self.fitting_type_dict = {} @@ -804,7 +804,6 @@ def train(self, train_data=None, valid_data=None, stop_batch: int = 10): cur_batch = self.global_step is_first_step = True self.cur_batch = cur_batch - self.optimizer = paddle.optimizer.Adam( learning_rate=self.learning_rate, parameters=self.model.parameters() )