diff --git a/data/raw/copy_raw.py b/data/raw/copy_raw.py index 642865db86..69ccdf5c63 100755 --- a/data/raw/copy_raw.py +++ b/data/raw/copy_raw.py @@ -85,7 +85,7 @@ def _main(): ) args = parser.parse_args() - print("# copy the system by %s copies" % args.ncopies) + print("# copy the system by %s copies" % args.ncopies) # noqa: T201 assert np.all( np.array(args.ncopies, dtype=int) >= np.array([1, 1, 1], dtype=int) ), "number of copies should be larger than or equal to 1" diff --git a/data/raw/shuffle_raw.py b/data/raw/shuffle_raw.py index 51bb7466c9..b4fc1457e5 100755 --- a/data/raw/shuffle_raw.py +++ b/data/raw/shuffle_raw.py @@ -37,7 +37,7 @@ def _main(): outpath = args.OUTPUT if not os.path.isdir(inpath): - print("# no input dir " + inpath + ", exit") + print("# no input dir " + inpath + ", exit") # noqa: T201 return if not os.path.isdir(outpath): @@ -47,16 +47,16 @@ def _main(): raws = detect_raw(inpath) if len(raws) == 0: - print("# no file to shuffle, exit") + print("# no file to shuffle, exit") # noqa: T201 return assert "box.raw" in raws tmp = np.loadtxt(os.path.join(inpath, "box.raw")) tmp = np.reshape(tmp, [-1, 9]) nframe = tmp.shape[0] - print(nframe) + print(nframe) # noqa: T201 - print( + print( # noqa: T201 "# will shuffle raw files " + str(raws) + " in dir " diff --git a/deepmd/entrypoints/doc.py b/deepmd/entrypoints/doc.py index 087eb10f73..e55e84f9d3 100644 --- a/deepmd/entrypoints/doc.py +++ b/deepmd/entrypoints/doc.py @@ -17,4 +17,4 @@ def doc_train_input(*, out_type: str = "rst", **kwargs): doc_str = gen_json() else: raise RuntimeError("Unsupported out type %s" % out_type) - print(doc_str) + print(doc_str) # noqa: T201 diff --git a/doc/sphinx_contrib_exhale_multiproject.py b/doc/sphinx_contrib_exhale_multiproject.py index e05cf88ba2..e26cc158a4 100644 --- a/doc/sphinx_contrib_exhale_multiproject.py +++ b/doc/sphinx_contrib_exhale_multiproject.py @@ -103,11 +103,11 @@ def exhale_environment_ready(app): app.config.exhale_args["containmentFolder"] = os.path.realpath( app.config.exhale_args["containmentFolder"] ) - print("=" * 75) - print(project) - print("-" * 50) - pprint(app.config.exhale_args) - print("=" * 75) + print("=" * 75) # noqa: T201 + print(project) # noqa: T201 + print("-" * 50) # noqa: T201 + pprint(app.config.exhale_args) # noqa: T203 + print("=" * 75) # noqa: T201 # First, setup the extension and verify all of the configurations. exhale.configs.apply_sphinx_configurations(app) diff --git a/pyproject.toml b/pyproject.toml index 0d1471c2c3..84cc7237bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -238,6 +238,7 @@ select = [ "NPY", # numpy "TID251", # banned-api "TID253", # banned-module-level-imports + "T20", # ban print ] ignore = [ @@ -283,6 +284,7 @@ banned-module-level-imports = [ "source/tests/pt/**" = ["TID253"] "source/ipi/tests/**" = ["TID253"] "source/lmp/tests/**" = ["TID253"] +"**/*.ipynb" = ["T20"] # printing in a nb file is expected [tool.pytest.ini_options] markers = "run" diff --git a/source/tests/pt/model/test_unused_params.py b/source/tests/pt/model/test_unused_params.py index c20a5f1dc5..36080c2bbd 100644 --- a/source/tests/pt/model/test_unused_params.py +++ b/source/tests/pt/model/test_unused_params.py @@ -87,8 +87,6 @@ def get_contributing_params(y, top_level=True): contributing_parameters = set(get_contributing_params(ret0["energy"])) all_parameters = set(self.model.parameters()) non_contributing = all_parameters - contributing_parameters - for ii in non_contributing: - print(ii.shape) self.assertEqual(len(non_contributing), 0) diff --git a/source/tests/pt/test_dp_test.py b/source/tests/pt/test_dp_test.py index 08bd2ce623..095994f8ec 100644 --- a/source/tests/pt/test_dp_test.py +++ b/source/tests/pt/test_dp_test.py @@ -49,8 +49,7 @@ def test_dp_test(self): try: res = tester.run() except StopIteration: - print("Unexpected stop iteration.(test step < total batch)") - raise StopIteration + raise StopIteration("Unexpected stop iteration.(test step < total batch)") for k, v in res.items(): if k == "rmse" or "mae" in k or k not in more_loss: continue diff --git a/source/tests/tf/common.py b/source/tests/tf/common.py index 0bcb29b4b5..d4f3cc8392 100644 --- a/source/tests/tf/common.py +++ b/source/tests/tf/common.py @@ -4,6 +4,7 @@ import os import pathlib import shutil +import warnings import dpdata import numpy as np @@ -969,7 +970,7 @@ def __init__(self, systems, set_prefix, batch_size, test_size, rcut, run_opt=Non ) chk_ret = self.data_systems[ii].check_test_size(test_size) if chk_ret is not None: - print( + warnings.warn( "WARNNING: system %s required test size %d is larger than the size %d of the dataset %s" % (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0]) ) diff --git a/source/tests/tf/test_adjust_sel.py b/source/tests/tf/test_adjust_sel.py index c86bad45b7..435d17d959 100644 --- a/source/tests/tf/test_adjust_sel.py +++ b/source/tests/tf/test_adjust_sel.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): # we use the setting for model compression data_file = str(tests_path / os.path.join("model_compression", "data")) diff --git a/source/tests/tf/test_finetune_se_atten.py b/source/tests/tf/test_finetune_se_atten.py index 35eb994a46..40fc5b68a3 100644 --- a/source/tests/tf/test_finetune_se_atten.py +++ b/source/tests/tf/test_finetune_se_atten.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -46,17 +45,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(setup_model, i): data_file = str(tests_path / os.path.join("finetune", "data")) data_file_mixed_type = str(tests_path / os.path.join("finetune", "data_mixed_type")) diff --git a/source/tests/tf/test_mixed_prec_training.py b/source/tests/tf/test_mixed_prec_training.py index 63504134af..4a4021771d 100644 --- a/source/tests/tf/test_mixed_prec_training.py +++ b/source/tests/tf/test_mixed_prec_training.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -28,17 +27,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - class TestMixedPrecTraining(unittest.TestCase): def setUp(self): data_file = str(tests_path / os.path.join("model_compression", "data")) diff --git a/source/tests/tf/test_model_compression_se_a.py b/source/tests/tf/test_model_compression_se_a.py index 37d1857661..4e49dd44e0 100644 --- a/source/tests/tf/test_model_compression_se_a.py +++ b/source/tests/tf/test_model_compression_se_a.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original.pb") diff --git a/source/tests/tf/test_model_compression_se_a_ebd.py b/source/tests/tf/test_model_compression_se_a_ebd.py index 1ab0cfe5cc..debae1f0ba 100644 --- a/source/tests/tf/test_model_compression_se_a_ebd.py +++ b/source/tests/tf/test_model_compression_se_a_ebd.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original-se-e2-a-v2.pb") diff --git a/source/tests/tf/test_model_compression_se_a_ebd_type_one_side.py b/source/tests/tf/test_model_compression_se_a_ebd_type_one_side.py index 5ae8ef4990..a24bf48398 100644 --- a/source/tests/tf/test_model_compression_se_a_ebd_type_one_side.py +++ b/source/tests/tf/test_model_compression_se_a_ebd_type_one_side.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original-se-e2-a-v2-one-side.pb") diff --git a/source/tests/tf/test_model_compression_se_a_type_one_side_exclude_types.py b/source/tests/tf/test_model_compression_se_a_type_one_side_exclude_types.py index 3726fc2bda..a9de974e4d 100644 --- a/source/tests/tf/test_model_compression_se_a_type_one_side_exclude_types.py +++ b/source/tests/tf/test_model_compression_se_a_type_one_side_exclude_types.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original-type-one-side-exclude-types.pb") diff --git a/source/tests/tf/test_model_compression_se_atten.py b/source/tests/tf/test_model_compression_se_atten.py index dbc54dd51a..aa1f0afa38 100644 --- a/source/tests/tf/test_model_compression_se_atten.py +++ b/source/tests/tf/test_model_compression_se_atten.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -29,17 +28,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - # 4 tests: # - type embedding FP64, se_atten FP64 # - type embedding FP64, se_atten FP32 diff --git a/source/tests/tf/test_model_compression_se_r.py b/source/tests/tf/test_model_compression_se_r.py index 4a5d9ad9f6..26665e5354 100644 --- a/source/tests/tf/test_model_compression_se_r.py +++ b/source/tests/tf/test_model_compression_se_r.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original-se-r.pb") diff --git a/source/tests/tf/test_model_compression_se_t.py b/source/tests/tf/test_model_compression_se_t.py index 0cf1135f8a..ec68176cdb 100644 --- a/source/tests/tf/test_model_compression_se_t.py +++ b/source/tests/tf/test_model_compression_se_t.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import json import os -import subprocess as sp import unittest import numpy as np @@ -33,17 +32,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - def _init_models(): data_file = str(tests_path / os.path.join("model_compression", "data")) frozen_model = str(tests_path / "dp-original-se-t.pb") diff --git a/source/tests/tf/test_parallel_training.py b/source/tests/tf/test_parallel_training.py index 1f93c809a2..d190764695 100644 --- a/source/tests/tf/test_parallel_training.py +++ b/source/tests/tf/test_parallel_training.py @@ -44,7 +44,6 @@ def test_two_workers(self): if hasattr(line, "decode"): line = line.decode("utf-8") line = line.rstrip() - print(line) popen.wait() self.assertEqual(0, popen.returncode, "Parallel training failed!") diff --git a/source/tests/tf/test_transfer.py b/source/tests/tf/test_transfer.py index e5b7f0a906..48e9f78e0d 100644 --- a/source/tests/tf/test_transfer.py +++ b/source/tests/tf/test_transfer.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import os -import subprocess as sp import unittest import numpy as np @@ -32,17 +31,6 @@ def _file_delete(file): os.remove(file) -def _subprocess_run(command): - popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT) - for line in iter(popen.stdout.readline, b""): - if hasattr(line, "decode"): - line = line.decode("utf-8") - line = line.rstrip() - print(line) - popen.wait() - return popen.returncode - - class TestTransform(unittest.TestCase): @classmethod def setUpClass(self):