diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml old mode 100644 new mode 100755 index 86c08d9..652dbdb --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,6 +37,3 @@ jobs: - name: Test with unit test (UT) pytest run: | pytest test/unit_test - - name: Test with system test (ST) pytest - run: | - pytest test/developer_test diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 539f823..8fe617b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,14 +5,10 @@ repos: # list of supported hooks: https://pre-commit.com/hooks.html - id: check-yaml - id: debug-statements - - id: end-of-file-fixer - - id: mixed-line-ending - args: ["--fix=lf"] - - id: trailing-whitespace - repo: https://github.com/pylint-dev/pylint rev: v2.14.5 hooks: - id: pylint args: [ "-rn", "-sn", "--rcfile=pylintrc", "--fail-on=I" ] - exclude: tests(/\w*)*/functional/|tests/input|tests(/\w*)*data/|doc/ + exclude: tests(/\w*)*/functional/|tests/input|tests(/\w*)*data/|doc/|test|pylintrc diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 index bf7cbc4..2f0ffcd --- a/README.md +++ b/README.md @@ -45,12 +45,12 @@ pip uninstall mindpet | 微调算法 | 算法论文 | 使用说明 | |----------------| ----------------------------------------------------------- |-----------------------------------------------------------------| -| LoRA | LoRA: Low-Rank Adaptation of Large Language Models | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第一章 | -| PrefixTuning | Prefix-Tuning: Optimizing Continuous Prompts for Generation | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第二章 | -| Adapter | Parameter-Efficient Transfer Learning for NLP | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第三章 | -| LowRankAdapter | Compacter: Efficient low-rank hypercom plex adapter layers | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第四章 | -| BitFit | BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第五章 | -| R_Drop | R-Drop: Regularized Dropout for Neural Networks | [TK_DeltaAlgorithm_README](doc/TK_DeltaAlgorithm_README.md) 第六章 | +| LoRA | LoRA: Low-Rank Adaptation of Large Language Models | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第一章 | +| PrefixTuning | Prefix-Tuning: Optimizing Continuous Prompts for Generation | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第二章 | +| Adapter | Parameter-Efficient Transfer Learning for NLP | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第三章 | +| LowRankAdapter | Compacter: Efficient low-rank hypercom plex adapter layers | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第四章 | +| BitFit | BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第五章 | +| R_Drop | R-Drop: Regularized Dropout for Neural Networks | [MindPet_DeltaAlgorithm_README](doc/MindPet_DeltaAlgorithm_README.md) 第六章 | @@ -60,7 +60,7 @@ pip uninstall mindpet MindPet支持用户根据 微调算法 或 模块名 冻结网络中部分模块,提供调用接口和配置文件两种实现方式。 -使用说明参考[TK_GraphOperation_README](doc/TK_GraphOperation_README.md) 第一章。 +使用说明参考[MindPet_GraphOperation_README](doc/MindPet_GraphOperation_README.md) 第一章。 @@ -68,4 +68,4 @@ MindPet支持用户根据 微调算法 或 模块名 冻结网络中部分模块 MindPet支持用户单独保存训练中可更新的参数为ckpt文件,从而节省存储所用的物理资源。 -使用说明参考[TK_GraphOperation_README](doc/TK_GraphOperation_README.md) 第二章。 +使用说明参考[MindPet_GraphOperation_README](doc/MindPet_GraphOperation_README.md) 第二章。 diff --git a/doc/TK_DeltaAlgorithm_README.md b/doc/MindPet_DeltaAlgorithm_README.md old mode 100644 new mode 100755 similarity index 95% rename from doc/TK_DeltaAlgorithm_README.md rename to doc/MindPet_DeltaAlgorithm_README.md index d29f769..4d707c9 --- a/doc/TK_DeltaAlgorithm_README.md +++ b/doc/MindPet_DeltaAlgorithm_README.md @@ -19,7 +19,7 @@ LoRA算法是一种针对超大语言模型的轻量化微调算法,通过使 #### LoRADense ```python -class tk.delta.lora.LoRADense(in_channels, +class mindpet.delta.lora.LoRADense(in_channels, out_channels, lora_rank, lora_alpha, @@ -137,7 +137,7 @@ shard(strategy_org_dense_matmul=None, 2)在模型的Attention结构中,从工具包引入`LoRADense`类,并将原query、value层的`nn.Dense`替换为`LoRADense`,无需修改原始参数,需新增`lora_rank`与`lora_alpha`两个必选参数,其余参数可参考API接口自行指定。如果进行分布式训练,可调用`shard`方法指定分布式策略。 ```python -from tk.delta import LoRADense +from mindpet.delta import LoRADense # original Dense Layer # dense1 = nn.Dense(in_channels=1*28*28, out_channels=512,...) @@ -155,21 +155,21 @@ dense1.shard(strategy_org_dense_matmul=((2, 1), (4, 1)), strategy_activation=((2, 4), (2, 4)) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LoRA`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LoRA`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph import freeze_delta +from mindpet.graph import freeze_delta # freeze all cells except LoRA and head freeze_delta(model=network, mode='lora', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -253,7 +253,7 @@ Prefix算法原理图: 对于每个下游任务,添加一份和当前任务相 ### 2.2 API接口 ``` python - class tk.delta.prefix_layer.PrefixLayer(prefix_token_num, + class mindpet.delta.prefix_layer.PrefixLayer(prefix_token_num, batch_size, num_heads, hidden_dim, @@ -306,7 +306,7 @@ import mindspore.nn as nn from mindspore.common.tensor import Tensor # 第一步 导入PrefixLayer -from tk.delta.prefix_layer import PrefixLayer +from mindpet.delta.prefix_layer import PrefixLayer #模型的Attention层 class MaskSelfAttention(nn.Cell): @@ -343,21 +343,21 @@ class MaskSelfAttention(nn.Cell): attention_mask = mindspore.ops.concat((attention_mask, self.help), -1) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Prefix`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Prefix`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except Prefix and head freeze_delta(model=network, mode='prefix', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -438,7 +438,7 @@ Adapter结构本质是一个bottleneck层,包含降维全连接层(adapter_d #### AdapterDense ```python -class tk.delta.adapter.AdapterDense(in_channels, +class mindpet.delta.adapter.AdapterDense(in_channels, out_channels, weight_init='normal', bias_init='zeros', @@ -550,7 +550,7 @@ shard(strategy_matmul_org=None, #### AdapterLayer ```python -class tk.delta.adapter.AdapterLayer(hidden_size: int, +class mindpet.delta.adapter.AdapterLayer(hidden_size: int, bottleneck_size: int = 64, non_linearity: str = "gelu", param_init_type: mindspore.dtype.float32, @@ -649,7 +649,7 @@ shard(strategy_matmul_down_sampler=None, 2)在模型的Attention结构中,从工具包中引入`AdapterDense`类,并参照算法原理将原有`nn.Dense`类替换为`AdapterDense`,无需修改原始参数,需新增`bottleneck_size`必选参数,其余参数可参考API接口自行指定。如果进行分布式训练,则调用`shard`方法指定分布式策略。 ```python -from tk.delta import AdapterDense +from mindpet.delta import AdapterDense # original Dense Layer # dense1 = nn.Dense(in_channels=1*28*28, out_channels=512,...) @@ -669,21 +669,21 @@ dense1.shard(strategy_matmul_org=((2, 4), (1, 4)), strategy_residential_add=((2, 1), (2, 1))) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Adapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Adapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except Adapter and head freeze_delta(model=network, mode='adapter', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -704,7 +704,7 @@ ckpt_callback = TrainableParamsCheckPoint(...) ```python import mindspore.nn as nn -from tk.delta import AdapterLayer +from mindpet.delta import AdapterLayer # original Dense Layer dense = nn.Dense(in_channels=1*28*28, out_channels=512,...) @@ -727,21 +727,21 @@ dense_output = dense(input_tensor) adapter_layer_output = adapter_layer(dense_output) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Adapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`Adapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except Adapter and head freeze_delta(model=network, mode='adapter', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -827,7 +827,7 @@ Low-Rank Adapter在论文中有所描述,其原理是将adapter层的每个矩 #### LowRankAdapterDense ```python -class tk.delta.adapter.LowRankAdapterDense(in_channels: int, +class mindpet.delta.adapter.LowRankAdapterDense(in_channels: int, out_channels: int, weight_init='normal', bias_init='zeros', @@ -959,7 +959,7 @@ shard(strategy_matmul_org=None, #### LowRankAdapterLayer ```python -class tk.delta.adapter.LowRankAdapterLayer(hidden_size: int, +class mindpet.delta.adapter.LowRankAdapterLayer(hidden_size: int, reduction_factor: int, low_rank_size: int = 1, low_rank_w_init="xavier_uniform", @@ -1079,7 +1079,7 @@ shard(strategy_matmul_down_sampler_weight=None, ```python import mindspore.nn as nn -from tk.delta import LowRankAdapterDense +from mindpet.delta import LowRankAdapterDense # original Dense Layer # dense1 = nn.Dense(in_channels=1*28*28, out_channels=512,...) @@ -1101,21 +1101,21 @@ dense1.shard(strategy_matmul_org=((2, 4), (1, 4)), strategy_residual_add=((2, 1), (2, 1))) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LowRankAdapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LowRankAdapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except LowRankAdapter and head freeze_delta(model=network, mode='low_rank_adapter', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -1136,7 +1136,7 @@ ckpt_callback = TrainableParamsCheckPoint(...) ```python import mindspore.nn as nn -from tk.delta import LowRankAdapterLayer +from mindpet.delta import LowRankAdapterLayer # original Dense Layer dense = nn.Dense(in_channels=1*28*28, out_channels=512,...) @@ -1161,21 +1161,21 @@ dense_output = dense(input_tensor) adapter_layer_output = low_rank_adapter_layer(dense_output) ``` -3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LowRankAdapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +3)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除`LowRankAdapter`矩阵外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```Python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except LowRankAdapter and head freeze_delta(model=network, mode='low_rank_adapter', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -1256,7 +1256,7 @@ BitFit是一种稀疏的微调方法。具体做法是在下游任务微调时 freeze_delta(model, mode, include, exclude) ``` -该函数提供了基本的BitFit算法冻结模式:当参数`mode='bitfit'`时,表明除了带有bias的参数,其余参数全部冻结。具体使用方法可参考[《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)。 +该函数提供了基本的BitFit算法冻结模式:当参数`mode='bitfit'`时,表明除了带有bias的参数,其余参数全部冻结。具体使用方法可参考[《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)。 @@ -1264,21 +1264,21 @@ freeze_delta(model, mode, include, exclude) 1)安装mindpet工具包。([安装方法参考《README.md》第二章](../README.md)) -2)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除bias参数外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《TK_GraphOperation_README.md》第一章](TK_GraphOperation_README.md)) +2)在训练脚本中,从工具包中引入`freeze_delta`方法,定义优化器之前调用`freeze_delta`冻结除bias参数外其它原模型权重。注意,为了适配下游任务引入的额外模型结构无需冻结,可以用`exclude`参数指定无需冻结的结构名称。([冻结方法参考《MindPet_GraphOperation_README.md》第一章](MindPet_GraphOperation_README.md)) ```python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # freeze all cell except bias and head freeze_delta(model=network, mode='bitfit', exclude=['*head*']) ``` -然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md)) +然后从工具包中引入`TrainableParamsCheckPoint`类,将保存ckpt的类改为`TrainableParamsCheckPoint`,仅保存需要更新的参数,可节约存储空间。([详细方法参考《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md)) 由于微调后只保存了部分参数,推理时具体如何加载ckpt请参考[附录A](###A 分布式微调后模型评估方法)。 ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint # original callback # ckpt_callback = ModelCheckpoint(...) @@ -1353,7 +1353,7 @@ R_Drop算法是一种用于提升精度的微调算法,使用“进行两次dr #### RDropLoss ```python -class tk.delta.r_drop.RDropLoss +class mindpet.delta.r_drop.RDropLoss ``` RDropLoss类使用了包含r_drop算法的loss计算方式,通过logits和labels计算ce_loss和kl_loss得到最终的loss值。 @@ -1362,7 +1362,7 @@ RDropLoss类使用了包含r_drop算法的loss计算方式,通过logits和labe #### RDropLoss.construct ```python -class tk.delta.r_drop.RDropLoss.construct(logits, label_ids, alpha) +class mindpet.delta.r_drop.RDropLoss.construct(logits, label_ids, alpha) ``` **参数** @@ -1389,7 +1389,7 @@ class tk.delta.r_drop.RDropLoss.construct(logits, label_ids, alpha) ```python import mindspore.nn as nn -from tk.delta import RDropLoss, rdrop_repeat +from mindpet.delta import RDropLoss, rdrop_repeat class BertClsModel(BaseModel): def __init__(self, ...): @@ -1468,7 +1468,7 @@ class BertClsModel(BaseModel): ### A 微调后模型评估方法 #### 场景一:使用TrainableParamsCheckPoint接口 -参考[《TK_GraphOperation_README.md》第二章](TK_GraphOperation_README.md) +参考[《MindPet_GraphOperation_README.md》第二章](MindPet_GraphOperation_README.md) #### 场景二:未使用TrainableParamsCheckPoint接口 当MindSpore版本低于1.9及以下时,在分布式微调之后,需要按照以下方案进行推理,示例代码参见如下,其中checkpoint文件列表、分布式策略文件路径、模型实例需要用户根据实际情况进行替换。 diff --git a/doc/TK_GraphOperation_README.md b/doc/MindPet_GraphOperation_README.md old mode 100644 new mode 100755 similarity index 97% rename from doc/TK_GraphOperation_README.md rename to doc/MindPet_GraphOperation_README.md index e183b74..119f517 --- a/doc/TK_GraphOperation_README.md +++ b/doc/MindPet_GraphOperation_README.md @@ -38,7 +38,7 @@ freeze_modules(model, include, exclude) **样例:** ```python -from tk.graph.freeze_utils import freeze_modules +from mindpet.graph.freeze_utils import freeze_modules # 初始化网络结构 model = Network() @@ -86,7 +86,7 @@ freeze_delta(model, mode, include, exclude) **样例:** ```python -from tk.graph.freeze_utils import freeze_delta +from mindpet.graph.freeze_utils import freeze_delta # 初始化网络结构 model = Network() @@ -141,7 +141,7 @@ freeze_from_config(model, config_path) **样例:** ```python -from tk.graph.freeze_utils import freeze_from_config +from mindpet.graph.freeze_utils import freeze_from_config # 初始化网络结构 model = Network() @@ -187,7 +187,7 @@ TrainableParamsCheckPoint(directory, prefix, config) - **在模型微调时**,从大模型微调工具包中引入`TrainableParamsCheckPoint`类,用法与MindSpore的`ModelCheckpoint`一致,实例化此`callback`后,加入训练时的`callback list`即可,例如: ```python -from tk.graph import TrainableParamsCheckPoint +from mindpet.graph import TrainableParamsCheckPoint from mindspore import CheckpointConfig ckpt_config = CheckpointConfig() diff --git a/doc/image/architecture_of_adapter_module.png b/doc/image/architecture_of_adapter_module.png old mode 100644 new mode 100755 diff --git a/doc/image/architecture_of_low_rank_adapter_module.png b/doc/image/architecture_of_low_rank_adapter_module.png old mode 100644 new mode 100755 diff --git a/doc/image/lora.PNG b/doc/image/lora.PNG old mode 100644 new mode 100755 diff --git a/doc/image/prefix.png b/doc/image/prefix.png old mode 100644 new mode 100755 diff --git a/mindpet/__init__.py b/mindpet/__init__.py old mode 100644 new mode 100755 index 3eb0186..9cb1ab2 --- a/mindpet/__init__.py +++ b/mindpet/__init__.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Mindpet sdk APIs.""" -import mindpet.tk_sdk as tk_sdk +from mindpet import mindpet_sdk -__all__ = ["tk_sdk"] +__all__ = ["mindpet_sdk"] diff --git a/mindpet/delta/__init__.py b/mindpet/delta/__init__.py old mode 100644 new mode 100755 diff --git a/mindpet/delta/adapter.py b/mindpet/delta/adapter.py old mode 100644 new mode 100755 index 2c5e7e5..08d792b --- a/mindpet/delta/adapter.py +++ b/mindpet/delta/adapter.py @@ -53,17 +53,17 @@ def __init__( self.non_linearity_name = non_linearity adapter_dict = OrderedDict() - adapter_dict["tk_delta_adapter_down_sampler"] = _Linear(hidden_size, + adapter_dict["mindpet_delta_adapter_down_sampler"] = _Linear(hidden_size, bottleneck_size, compute_dtype=compute_dtype, param_init_type=param_init_type) - adapter_dict["tk_delta_adapter_non_linear"] = get_activation(non_linearity) - adapter_dict["tk_delta_adapter_up_sampler"] = _Linear(bottleneck_size, + adapter_dict["mindpet_delta_adapter_non_linear"] = get_activation(non_linearity) + adapter_dict["mindpet_delta_adapter_up_sampler"] = _Linear(bottleneck_size, hidden_size, compute_dtype=compute_dtype, param_init_type=param_init_type) - self.tk_delta_adapter_block = nn.SequentialCell(adapter_dict) + self.mindpet_delta_adapter_block = nn.SequentialCell(adapter_dict) self.residual_add = P.Add() self.cast = P.Cast() self.shape = P.Shape() @@ -79,7 +79,7 @@ def construct(self, input_tensor): input_tensor = self.reshape(input_tensor, (-1, input_tensor_shape[-1])) # calculate adapter_out - adapter_out = self.tk_delta_adapter_block(input_tensor) + adapter_out = self.mindpet_delta_adapter_block(input_tensor) # residual connection, add input and adapter_out output = self.residual_add(input_tensor, adapter_out) @@ -99,27 +99,29 @@ def shard(self, strategy_residual_add=None): """Shard Method""" try: - self.tk_delta_adapter_block.tk_delta_adapter_down_sampler.shard( + self.mindpet_delta_adapter_block.mindpet_delta_adapter_down_sampler.shard( strategy_matmul=strategy_matmul_down_sampler, strategy_bias=strategy_bias_down_sampler) if self.non_linearity_name.lower() == "leakyrelu": - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.select_op.shard( + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.select_op.shard( (strategy_non_linearity[0], strategy_non_linearity[0])) elif self.non_linearity_name.lower() == "logsigmoid": - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.mul.shard((strategy_non_linearity[0], ())) - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.exp.shard(strategy_non_linearity) - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.add.shard((strategy_non_linearity[0], ())) - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.rec.shard(strategy_non_linearity) - self.tk_delta_adapter_block.tk_delta_adapter_non_linear.log.shard(strategy_non_linearity) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.mul.shard(( + strategy_non_linearity[0], ())) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.exp.shard(strategy_non_linearity) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.add.shard(( + strategy_non_linearity[0], ())) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.rec.shard(strategy_non_linearity) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.log.shard(strategy_non_linearity) elif self.non_linearity_name.lower() == "logsoftmax": raise ValueError("The 'LogSoftmax' function is not supported in semi auto parallel " "or auto parallel mode.") else: - getattr(self.tk_delta_adapter_block.tk_delta_adapter_non_linear, + getattr(self.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear, self.non_linearity_name).shard(strategy_non_linearity) - self.tk_delta_adapter_block.tk_delta_adapter_up_sampler.shard(strategy_matmul=strategy_matmul_up_sampler, - strategy_bias=strategy_bias_up_sampler) + self.mindpet_delta_adapter_block.mindpet_delta_adapter_up_sampler.shard( + strategy_matmul=strategy_matmul_up_sampler,strategy_bias=strategy_bias_up_sampler) self.residual_add.shard(strategy_residual_add) @@ -142,7 +144,7 @@ class AdapterDense(nn.Dense): 当使用str时,值引用自类initializer;更多细节请参考Initializer的值。 当使用Tensor时,数据类型与输入Tensor相同。 默认值:"normal"。 - bias_init (Union[Tensor, str, Initializer, numbers.Number]): + bias_init (Union[Tensor, str, Initializer, numbers.Number]): 线性层偏置参数的初始化方法。 它的类型可以是Tensor,str,Initializer或numbers.Number。 当使用str时,值引用自类initializer;更多细节请参考Initializer的值。 @@ -194,7 +196,7 @@ def __init__(self, has_bias=has_bias, activation=activation) - self.tk_delta_adapter = AdapterLayer(hidden_size=out_channels, + self.mindpet_delta_adapter = AdapterLayer(hidden_size=out_channels, bottleneck_size=bottleneck_size, non_linearity=non_linearity, param_init_type=param_init_type, @@ -226,7 +228,7 @@ def construct(self, input_tensor): input_tensor = self.activation(input_tensor) # calculate adapter_out - input_tensor = self.tk_delta_adapter(input_tensor) + input_tensor = self.mindpet_delta_adapter(input_tensor) # recover the previous outshape and dtype out_shape = x_shape[:-1] + (-1,) @@ -267,7 +269,7 @@ def shard(self, getattr(self.activation, self.act_name).shard(strategy_activation_org) # set adapter strategy - self.tk_delta_adapter.shard(strategy_matmul_down_sampler=strategy_matmul_down_sampler, + self.mindpet_delta_adapter.shard(strategy_matmul_down_sampler=strategy_matmul_down_sampler, strategy_bias_down_sampler=strategy_bias_down_sampler, strategy_non_linearity=strategy_non_linearity, strategy_matmul_up_sampler=strategy_matmul_up_sampler, diff --git a/mindpet/delta/delta_constants.py b/mindpet/delta/delta_constants.py old mode 100644 new mode 100755 diff --git a/mindpet/delta/lora.py b/mindpet/delta/lora.py old mode 100644 new mode 100755 index de7bb5a..2dd6ddc --- a/mindpet/delta/lora.py +++ b/mindpet/delta/lora.py @@ -59,11 +59,11 @@ def __init__( self.lora_rank = lora_rank self.lora_alpha = lora_alpha self.lora_dropout = get_dropout(lora_dropout) - self.tk_delta_lora_a = Parameter( + self.mindpet_delta_lora_a = Parameter( initializer(lora_a_init, [lora_rank, in_channels], param_init_type), - name='tk_delta_lora_A') - self.tk_delta_lora_b = Parameter(initializer(lora_b_init, [out_channels, lora_rank], param_init_type), - name='tk_delta_lora_B') + name='mindpet_delta_lora_A') + self.mindpet_delta_lora_b = Parameter(initializer(lora_b_init, [out_channels, lora_rank], param_init_type), + name='mindpet_delta_lora_B') self.scaling = self.lora_alpha / self.lora_rank # Calculation utils @@ -80,8 +80,8 @@ def construct(self, input_tensor): ori_dtype = F.dtype(input_tensor) input_tensor = self.cast(input_tensor, self.dtype) weight = self.cast(self.weight, self.dtype) - lora_a = self.cast(self.tk_delta_lora_a, self.dtype) - lora_b = self.cast(self.tk_delta_lora_b, self.dtype) + lora_a = self.cast(self.mindpet_delta_lora_a, self.dtype) + lora_b = self.cast(self.mindpet_delta_lora_b, self.dtype) scaling = self.cast(self.scaling, self.dtype) # Shape operations diff --git a/mindpet/delta/low_rank_adapter.py b/mindpet/delta/low_rank_adapter.py old mode 100644 new mode 100755 index a42056b..0beaf9c --- a/mindpet/delta/low_rank_adapter.py +++ b/mindpet/delta/low_rank_adapter.py @@ -64,12 +64,12 @@ def __init__(self, self.out_channels = out_channels self.rank = rank self.weight_init = weight_init - self.tk_delta_low_rank_adapter_weight_left = \ + self.mindpet_delta_low_rank_adapter_weight_left = \ Parameter(initializer(self.weight_init, [in_channels, rank], param_init_type), - name="tk_delta_low_rank_adapter_weight_left") - self.tk_delta_low_rank_adapter_weight_right = \ + name="mindpet_delta_low_rank_adapter_weight_left") + self.mindpet_delta_low_rank_adapter_weight_right = \ Parameter(initializer(self.weight_init, [rank, out_channels], param_init_type), - name="tk_delta_low_rank_adapter_weight_right") + name="mindpet_delta_low_rank_adapter_weight_right") self.has_bias = has_bias self.bias = None @@ -99,8 +99,8 @@ def construct(self, input_tensor): input_tensor = self.reshape(input_tensor, (-1, x_shape[-1])) # compute weight - weight = self.matmul_weight(self.cast(self.tk_delta_low_rank_adapter_weight_left, self.compt_dtype), - self.cast(self.tk_delta_low_rank_adapter_weight_right, self.compt_dtype)) + weight = self.matmul_weight(self.cast(self.mindpet_delta_low_rank_adapter_weight_left, self.compt_dtype), + self.cast(self.mindpet_delta_low_rank_adapter_weight_right, self.compt_dtype)) input_tensor = self.cast(input_tensor, self.compt_dtype) input_tensor = self.matmul_input(input_tensor, weight) @@ -179,15 +179,15 @@ def __init__( self.bottleneck_size = hidden_size // reduction_factor self.non_linearity = non_linearity - self.tk_delta_low_rank_adapter_down_sampler = LowRankLinear(in_channels=hidden_size, + self.mindpet_delta_low_rank_adapter_down_sampler = LowRankLinear(in_channels=hidden_size, out_channels=self.bottleneck_size, rank=low_rank_size, weight_init=low_rank_w_init, param_init_type=param_init_type, compute_dtype=compute_dtype) - self.tk_delta_low_rank_adapter_non_linear = get_activation( + self.mindpet_delta_low_rank_adapter_non_linear = get_activation( non_linearity) - self.tk_delta_low_rank_adapter_up_sampler = LowRankLinear(in_channels=self.bottleneck_size, + self.mindpet_delta_low_rank_adapter_up_sampler = LowRankLinear(in_channels=self.bottleneck_size, out_channels=hidden_size, rank=low_rank_size, weight_init=low_rank_w_init, @@ -205,11 +205,11 @@ def construct(self, input_tensor): input_tensor = P.Reshape()(input_tensor, (-1, x_shape[-1])) # calculate adapter_out - adapter_down_sampler_output = self.tk_delta_low_rank_adapter_down_sampler( + adapter_down_sampler_output = self.mindpet_delta_low_rank_adapter_down_sampler( input_tensor) - adapter_non_linear_output = self.tk_delta_low_rank_adapter_non_linear( + adapter_non_linear_output = self.mindpet_delta_low_rank_adapter_non_linear( adapter_down_sampler_output) - adapter_output = self.tk_delta_low_rank_adapter_up_sampler( + adapter_output = self.mindpet_delta_low_rank_adapter_up_sampler( adapter_non_linear_output) # residual connection, add input and adapter_output @@ -253,30 +253,30 @@ def shard(self, strategy_matmul_down_sampler_weight=None, strategy_residual_add (tuple): The strategy for the residual_add. """ try: - self.tk_delta_low_rank_adapter_down_sampler.shard( + self.mindpet_delta_low_rank_adapter_down_sampler.shard( strategy_matmul_down_sampler_weight, strategy_matmul_down_sampler_input, strategy_bias_down_sampler) - self.tk_delta_low_rank_adapter_up_sampler.shard( + self.mindpet_delta_low_rank_adapter_up_sampler.shard( strategy_matmul_up_sampler_weight, strategy_matmul_up_sampler_input, strategy_bias_up_sampler) # some operations has many primitives, need to manually set the shard if self.non_linearity.lower() == "leakyrelu": - self.tk_delta_low_rank_adapter_non_linear.select_op.shard( + self.mindpet_delta_low_rank_adapter_non_linear.select_op.shard( (strategy_non_linearity[0], strategy_non_linearity[0])) elif self.non_linearity.lower() == "logsigmoid": - self.tk_delta_low_rank_adapter_non_linear.mul.shard( + self.mindpet_delta_low_rank_adapter_non_linear.mul.shard( (strategy_non_linearity[0], ())) - self.tk_delta_low_rank_adapter_non_linear.exp.shard( + self.mindpet_delta_low_rank_adapter_non_linear.exp.shard( strategy_non_linearity) - self.tk_delta_low_rank_adapter_non_linear.add.shard( + self.mindpet_delta_low_rank_adapter_non_linear.add.shard( (strategy_non_linearity[0], ())) - self.tk_delta_low_rank_adapter_non_linear.rec.shard( + self.mindpet_delta_low_rank_adapter_non_linear.rec.shard( strategy_non_linearity) - self.tk_delta_low_rank_adapter_non_linear.log.shard( + self.mindpet_delta_low_rank_adapter_non_linear.log.shard( strategy_non_linearity) elif self.non_linearity.lower() == "logsoftmax": raise ValueError("The 'LogSoftmax' function is not supported in semi auto parallel " "or auto parallel mode.") else: - getattr(self.tk_delta_low_rank_adapter_non_linear, + getattr(self.mindpet_delta_low_rank_adapter_non_linear, self.non_linearity).shard(strategy_non_linearity) self.residual_add.shard(strategy_residual_add) @@ -372,7 +372,7 @@ def __init__(self, bias_init=bias_init, has_bias=has_bias, activation=activation) - self.tk_delta_low_rank_adapter = LowRankAdapterLayer(hidden_size=out_channels, + self.mindpet_delta_low_rank_adapter = LowRankAdapterLayer(hidden_size=out_channels, reduction_factor=reduction_factor, low_rank_size=low_rank_size, low_rank_w_init=low_rank_w_init, @@ -404,7 +404,7 @@ def construct(self, input_tensor): input_tensor = self.activation(input_tensor) # calculate low_rank_adapter_out - input_tensor = self.tk_delta_low_rank_adapter(input_tensor) + input_tensor = self.mindpet_delta_low_rank_adapter(input_tensor) # recover the previous outshape and dtype out_shape = x_shape[:-1] + (-1,) @@ -472,7 +472,7 @@ def shard(self, strategy_matmul_org=None, strategy_activation_org) # set low_rank_adapter strategy - self.tk_delta_low_rank_adapter.shard(strategy_matmul_down_sampler_weight, + self.mindpet_delta_low_rank_adapter.shard(strategy_matmul_down_sampler_weight, strategy_matmul_down_sampler_input, strategy_bias_down_sampler, strategy_non_linearity, diff --git a/mindpet/delta/prefix_layer.py b/mindpet/delta/prefix_layer.py old mode 100644 new mode 100755 index aa279bf..e007e26 --- a/mindpet/delta/prefix_layer.py +++ b/mindpet/delta/prefix_layer.py @@ -62,9 +62,9 @@ def __init__(self, def __define_network(self) -> None: """the network structure of prefix""" self.input_tokens = ms.Parameter(ms.numpy.arange(0, self.prefix_token_num, 1), - name="tk_delta_prefixtuning_input_tokens", requires_grad=False) - self.tk_delta_prefixtuning_wte = nn.Embedding(self.prefix_token_num, self.embed_dim) - self.tk_delta_prefixtuning_control_trans = nn.SequentialCell( + name="mindpet_delta_prefixtuning_input_tokens", requires_grad=False) + self.mindpet_delta_prefixtuning_wte = nn.Embedding(self.prefix_token_num, self.embed_dim) + self.mindpet_delta_prefixtuning_control_trans = nn.SequentialCell( nn.Dense(self.embed_dim, self.mid_dim), nn.Tanh(), nn.Dense(self.mid_dim, self.hidden_dim) @@ -73,8 +73,8 @@ def __define_network(self) -> None: def __allocate_parameter(self): """the value of prefix matrix""" input_tokens = self.input_tokens - temp_control = self.tk_delta_prefixtuning_wte(input_tokens) - past_key_values = self.tk_delta_prefixtuning_control_trans(temp_control) + temp_control = self.mindpet_delta_prefixtuning_wte(input_tokens) + past_key_values = self.mindpet_delta_prefixtuning_control_trans(temp_control) seq_len, _ = past_key_values.shape past_key_values = past_key_values.view(seq_len, -1, self.hidden_dim) past_key_values = self.dropout(past_key_values) diff --git a/mindpet/delta/r_drop.py b/mindpet/delta/r_drop.py old mode 100644 new mode 100755 diff --git a/mindpet/graph/__init__.py b/mindpet/graph/__init__.py old mode 100644 new mode 100755 index 3ca06e0..696af60 --- a/mindpet/graph/__init__.py +++ b/mindpet/graph/__init__.py @@ -1,7 +1,10 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +""" +This module contains utility functions for freezing modules in the MindPet framework. +Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +""" from mindpet.graph.freeze_utils import freeze_modules, freeze_delta, freeze_from_config from mindpet.graph.ckpt_util import TrainableParamsCheckPoint diff --git a/mindpet/graph/ckpt_util.py b/mindpet/graph/ckpt_util.py old mode 100644 new mode 100755 index 334f5dd..6732050 --- a/mindpet/graph/ckpt_util.py +++ b/mindpet/graph/ckpt_util.py @@ -9,7 +9,7 @@ import time from collections import OrderedDict -import mindspore.nn as nn +from mindspore import nn from mindspore import context, Tensor from mindspore.train.callback._callback import set_cur_net from mindspore.train.callback import ModelCheckpoint @@ -21,6 +21,7 @@ class TrainableParamsCheckPoint(ModelCheckpoint): + """TrainableParamsCheckPoint class""" def __init__(self, directory, prefix="DELTA_CKP", config=None): """ Callback初始化 @@ -39,10 +40,10 @@ def __init__(self, directory, prefix="DELTA_CKP", config=None): self._last_triggered_step = 0 self._latest_ckpt_file_name = "" self._cur_time_for_keep = time.time() - super(TrainableParamsCheckPoint, self).__init__(prefix, directory, config) + super().__init__(prefix, directory, config) def set_train_info(self, cb_params, cur_step_num): - # 保持ckpt文件始终小于等于配置的ckpt文件最大数 + """保持ckpt文件始终小于等于配置的ckpt文件最大数""" if self._config.keep_checkpoint_max and \ 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num: self._manager.remove_oldest_ckpoint_file() @@ -63,6 +64,7 @@ def set_train_info(self, cb_params, cur_step_num): def trans_network(self, network): """从network中选取可训练的参数,仅保存这部分参数""" + parameter_layout_dict = network.parameter_layout_dict network.init_parameters_data() param_dict = OrderedDict() for param in network.trainable_params(): @@ -72,7 +74,7 @@ def trans_network(self, network): each = {"name": param_name} param_data = Tensor(param.data.asnumpy()) if param_name in network.parameter_layout_dict: - param_data = _get_merged_param_data(network, param_name, param_data, + param_data = _get_merged_param_data(network, parameter_layout_dict, param_name, param_data, self._config.integrated_save) each["data"] = param_data param_list.append(each) @@ -130,4 +132,3 @@ def _save_ckpt(self, cb_params, force_to_save=False): self._latest_ckpt_file_name = cur_file logger.info("Save checkpoint successfully.") - diff --git a/mindpet/graph/freeze_utils.py b/mindpet/graph/freeze_utils.py old mode 100644 new mode 100755 index 667d80b..35ef5e5 --- a/mindpet/graph/freeze_utils.py +++ b/mindpet/graph/freeze_utils.py @@ -8,7 +8,7 @@ from typing import Optional, List from fnmatch import fnmatch -import mindspore.nn as nn +from mindspore import nn from mindpet.log.log import logger from mindpet.utils.constants import DELTA_LIST @@ -92,7 +92,7 @@ def freeze_delta(model: nn.Cell, try: freeze_modules(model, include, exclude) except Exception as ex: - raise Exception(f"Exception occurred when freeze model for delta, error message: {str(ex)}") from ex + raise UnexpectedError(f"Exception occurred when freeze model for delta, error message: {str(ex)}") from ex logger.info("End to freeze model for delta.") @@ -147,7 +147,7 @@ def _freeze_for_mode(model: nn.Cell, mode: str) -> None: :param model: 需要冻结的模型实例,必填。 """ - delta_name = '*tk_delta_' + mode + '*' + delta_name = '*mindpet_delta_' + mode + '*' if mode == 'bitfit': delta_name = '*bias' freeze_modules(model, include=['*'], exclude=[delta_name]) diff --git a/mindpet/log/__init__.py b/mindpet/log/__init__.py old mode 100644 new mode 100755 index 386cb80..1796233 --- a/mindpet/log/__init__.py +++ b/mindpet/log/__init__.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +""" +Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +""" from mindpet.log.log import get_logger, logger, operation_logger_without_std, operation_logger, logger_without_std diff --git a/mindpet/log/log.py b/mindpet/log/log.py old mode 100644 new mode 100755 index 15b3130..4a891b9 --- a/mindpet/log/log.py +++ b/mindpet/log/log.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""log module.""" import logging import logging.config @@ -10,9 +11,6 @@ from typing import Dict, List, Tuple, Union import traceback -from mindpet.utils.constants import DEFAULT_MAX_LOG_FILE_NUM, DEFAULT_MAX_LOG_FILE_SIZE, ABNORMAL_EXIT_CODE -from mindpet.utils.exceptions import MakeDirError, UnsupportedPlatformError, PathOwnerError, PathModeError - try: from concurrent_log_handler import ConcurrentRotatingFileHandler as RFHandler except ImportError: @@ -20,6 +18,9 @@ from mindpet.log.log_utils import check_list, const, convert_nodes_devices_input, create_dirs, \ generate_rank_list, get_num_nodes_devices, get_rank_info, log_args_black_list_characters_replace, check_link_path +from mindpet.utils.constants import DEFAULT_MAX_LOG_FILE_NUM, DEFAULT_MAX_LOG_FILE_SIZE, ABNORMAL_EXIT_CODE +from mindpet.utils.exceptions import MakeDirError, UnsupportedPlatformError, PathOwnerError, PathModeError + logger_list = {} stream_handler_list = {} @@ -102,7 +103,7 @@ def validate_level(var_name: str, var): if not isinstance(var, str): raise TypeError(f'The format of {var_name} must be of type str.') if var not in const.level: - raise ValueError('{}={} needs to be in {}'.format(var_name, var, const.level)) + raise ValueError(f'{var_name}={var} needs to be in {const.level}') def validate_std_input_format(to_std: bool, stdout_nodes: Union[List, Tuple, None], @@ -121,12 +122,12 @@ def validate_file_input_format(file_level: Union[List, Tuple], file_save_dir: st file_name: Union[List, Tuple]): """验证日志内容落盘的参数""" - if not (isinstance(file_level, tuple) or isinstance(file_level, list)): + if not isinstance(file_level, (tuple, list)): raise TypeError('The value of file_level should be list or a tuple.') for level in file_level: validate_level('level in file_level', level) - if not (isinstance(file_name, (tuple, list))): + if not isinstance(file_name, (tuple, list)): raise TypeError('The value of file_name should be a list or a tuple.') if not len(file_level) == len(file_name): @@ -232,17 +233,12 @@ class CustomizedRotatingFileHandler(RFHandler): 2.实现日志文件的权限控制 3.日志文件的绕接 """ - - def __init__(self, *args, **kwargs): - """重写init,增加日志文件的权限控制""" - super(CustomizedRotatingFileHandler, self).__init__(*args, **kwargs) - def doRollover(self) -> None: """重写doRoller,实现绕接,并且对日志文件的权限进行控制""" - super(CustomizedRotatingFileHandler, self).doRollover() + super().doRollover() if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): - file_name = self.rotation_filename('%s.%d' % (self.baseFilename, i)) + file_name = self.rotation_filename(f'{self.baseFilename}.{i}') if os.path.exists(file_name) and not check_link_path(str(file_name)): os.chmod(file_name, MODE_440) @@ -252,7 +248,7 @@ def emit(self, record) -> None: if tmp_out is not None and len(tmp_out) > LOG_RECORD_MAX_LEN: record.msg = tmp_out[:LOG_RECORD_MAX_LEN] record.args = () - super(CustomizedRotatingFileHandler, self).emit(record) + super().emit(record) if not check_link_path(str(self.baseFilename)) and os.path.exists(self.baseFilename): os.chmod(self.baseFilename, MODE_640) @@ -260,7 +256,7 @@ def format(self, record): return log_format(self, record) def _open_lockfile(self): - super(CustomizedRotatingFileHandler, self)._open_lockfile() + super()._open_lockfile() self._do_chmod(MODE_640) def _do_chmod(self, mode): @@ -299,8 +295,9 @@ def get_file_handler_list(file_level: Union[List, Tuple], file_path: Union[List, class MxLogger(logging.Logger): + """Define Mx logger""" def __init__(self, name, **kwargs): - super(MxLogger, self).__init__(name) + super().__init__(name) self.source = None self.method = None self.propagate = False @@ -331,15 +328,16 @@ def makeRecord( ): """重写makeRecord方法,日志内容中增加发起端标识和pid信息""" if extra is None: - extra = dict() + extra = {} extra['source'] = self.source if hasattr(self, 'source') else UNKNOWN extra['pid'] = self.pid if hasattr(self, 'pid') else UNKNOWN args = log_args_black_list_characters_replace(args) - return super(MxLogger, self).makeRecord(name, level=level, fn=fn, lno=lno, msg=msg, args=args, + return super().makeRecord(name, level=level, fn=fn, lno=lno, msg=msg, args=args, exc_info=exc_info, func=func, extra=extra, sinfo=sinfo) def set_logger(self): + """设置logger输出格式以及日志输出路径""" if const.local_default_log_file_dir is None: const.get_local_default_log_file_dir() file_save_dir = os.path.expanduser(const.local_default_log_file_dir) @@ -361,11 +359,13 @@ def set_logger(self): self.setLevel(_convert_level('DEBUG')) self.set_flag = True + # pylint: disable=W0221 def _log(self, level, msg, args, **kwargs): + """日志接口""" if not self.set_flag: try: self.set_logger() @@ -377,10 +377,11 @@ def _log(self, else: logging.error(traceback.format_exc()) sys.exit(ABNORMAL_EXIT_CODE) + # pylint: disable=W0703 except Exception: logging.error(traceback.format_exc()) sys.exit(ABNORMAL_EXIT_CODE) - super(MxLogger, self)._log(level, msg, args, **kwargs) + super()._log(level, msg, args, **kwargs) def set_logger_property(source): diff --git a/mindpet/log/log_utils.py b/mindpet/log/log_utils.py old mode 100644 new mode 100755 index f217c6b..e8da017 --- a/mindpet/log/log_utils.py +++ b/mindpet/log/log_utils.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""Log APIs""" import os import stat import platform from typing import Dict, List, Tuple, Union -from mindpet.utils.exceptions import MakeDirError, UnsupportedPlatformError, PathOwnerError, PathModeError +from mindpet.utils.exceptions import MakeDirError, UnsupportedPlatformError, PathOwnerError from mindpet.utils.constants import EMPTY_STRING LOG_CONTENT_BLACK_LIST = ('\r', '\n', '\t', '\f', '\v', '\b', '\u000A', '\u000D', '\u000C', @@ -130,7 +130,7 @@ def get_num_nodes_devices(rank_size: int) -> Tuple[int, int]: def log_args_black_list_characters_replace(args): """日志内容参数黑名单校验""" - res = list() + res = [] if args is None or len(args) == 0: return args if isinstance(args, (list, tuple)): @@ -175,12 +175,6 @@ def home_path_check(path): if path_owner != current_login_user: raise PathOwnerError('The owner of $HOME path is not current login user.') - mode = oct(os.stat(path).st_mode)[-3:] - ret = (int(mode[0]), int(mode[1]), int(mode[2])) - if stat.S_IWOTH & ret[2] != 0: - raise PathModeError('$HOME path may have risk of rights escalation, ' - 'other users should not have permission to write.') - if file_name is None or file_name == '': raise ValueError('[file_name] is None or empty.') @@ -226,6 +220,7 @@ def home_path_check(path): def specific_path_config_legality_check(specific_path_config): + """specific_path_config_legality_check""" specific_path = specific_path_config.get('path') if specific_path is None: @@ -240,6 +235,7 @@ def specific_path_config_legality_check(specific_path_config): class Const: + """Const""" def __init__(self): self.level = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') self.local_default_log_file_dir = None diff --git a/mindpet/tk_main.py b/mindpet/mindpet_main.py old mode 100644 new mode 100755 similarity index 95% rename from mindpet/tk_main.py rename to mindpet/mindpet_main.py index a474442..d0f3d70 --- a/mindpet/tk_main.py +++ b/mindpet/mindpet_main.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""MindPet Tuning Main.""" import sys import click @@ -23,6 +24,8 @@ def cli_wrapper(): try: cli(standalone_mode=False) + # pylint: disable=W0719 + # pylint: disable=W0703 except Exception as ex: handle_exception_log(ex) sys.exit(ABNORMAL_EXIT_CODE) diff --git a/mindpet/tk_sdk.py b/mindpet/mindpet_sdk.py old mode 100644 new mode 100755 similarity index 86% rename from mindpet/tk_sdk.py rename to mindpet/mindpet_sdk.py index 9fd56ef..7c2b1ce --- a/mindpet/tk_sdk.py +++ b/mindpet/mindpet_sdk.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""MindPet SDK Module.""" -from mindpet.tk_main import cli +from mindpet.mindpet_main import cli from mindpet.log.log import logger, set_logger_property from mindpet.utils.task_utils import handle_exception_log from mindpet.utils.entrance_monitor import entrance_monitor -from mindpet.utils.constants import ENTRANCE_TYPE, EMPTY_STRING, ARG_NAMES, TK_SDK_INTERFACE_NAMES +from mindpet.utils.constants import ENTRANCE_TYPE, EMPTY_STRING, ARG_NAMES, MINDPET_SDK_INTERFACE_NAMES entrance_monitor.set_value(ENTRANCE_TYPE, 'SDK') set_logger_property('SDK') @@ -55,13 +56,15 @@ def start_by_task_type(args, kwargs, task_type, ret_err_msg): :param kwargs: 参数kwargs :return: 任务执行结果 """ - if task_type not in TK_SDK_INTERFACE_NAMES: + if task_type not in MINDPET_SDK_INTERFACE_NAMES: logger.error('Invalid task_type for starting task.') return ret_err_msg try: commands = commands_generator(task_type, args, kwargs) return cli.main(commands, standalone_mode=False) + # pylint: disable=W0719 + # pylint: disable=W0703 except Exception as ex: handle_exception_log(ex) return ret_err_msg @@ -82,18 +85,18 @@ def commands_generator(header, args, kwargs): args_length = min(len(args), len(ARG_NAMES.get(header))) for idx in range(args_length): - output.append('--{}'.format(ARG_NAMES.get(header)[idx])) - output.append('{}'.format(args[idx])) + output.append(f"--{(ARG_NAMES.get(header)[idx])}") + output.append(f"--{(args[idx])}") for key_item, val_item in kwargs.items(): # 安静模式仅允许CLI场景使用, SDK场景不允许配置该值, 给予错误警告 if str(key_item) == 'quiet': raise ValueError('Param [quiet] is not supported by SDK.') # SDK场景参数值传None, 等价于CLI场景未传参数, 不应拼接到命令中 - elif val_item is None: + if val_item is None: continue - output.append('--{}'.format(key_item)) - output.append('{}'.format(val_item)) + output.append(f"--{key_item}") + output.append(f"--{val_item}") return [header] + output diff --git a/mindpet/security/__init__.py b/mindpet/security/__init__.py old mode 100644 new mode 100755 index dfcf6a1..8073bb8 --- a/mindpet/security/__init__.py +++ b/mindpet/security/__init__.py @@ -1,3 +1,4 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""security module.""" diff --git a/mindpet/security/param_check/__init__.py b/mindpet/security/param_check/__init__.py old mode 100644 new mode 100755 index dfcf6a1..3c6ec77 --- a/mindpet/security/param_check/__init__.py +++ b/mindpet/security/param_check/__init__.py @@ -1,3 +1,4 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""param check module.""" diff --git a/mindpet/security/param_check/base_check.py b/mindpet/security/param_check/base_check.py old mode 100644 new mode 100755 index 1979b2f..0817b86 --- a/mindpet/security/param_check/base_check.py +++ b/mindpet/security/param_check/base_check.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""base check module.""" from mindpet.security.param_check.option_check_utils import PathRightEscalationCheck, FileSizeCheck class BaseCheckParam: + """base param check class.""" def __init__(self, mode, path_including_file, force_quit, quiet): """ 入参校验参数 @@ -21,6 +22,7 @@ def __init__(self, mode, path_including_file, force_quit, quiet): class BaseCheck: + """base check class.""" def __init__(self, option_name, option_value): """ 基础校验项构造方法 diff --git a/mindpet/security/param_check/model_config_params_check_util.py b/mindpet/security/param_check/model_config_params_check_util.py old mode 100644 new mode 100755 index 8ac3f38..456a182 --- a/mindpet/security/param_check/model_config_params_check_util.py +++ b/mindpet/security/param_check/model_config_params_check_util.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Model Config check module.""" from mindpet.log.log import logger from mindpet.utils.exceptions import ModelConfigParamsInfoError from mindpet.utils.constants import MODEL_CONFIG_PARAMS_CHARACTER_BLACKLIST, MODEL_CONFIG_LEN_LIMIT, \ - TK_DEFINED_PARAM_NAME, INVALID_CUSTOM_PARAM_KEY_PREFIX, INVALID_CUSTOM_PARAM_VAL_PREFIX + MINDPET_DEFINED_PARAM_NAME, INVALID_CUSTOM_PARAM_KEY_PREFIX, INVALID_CUSTOM_PARAM_VAL_PREFIX class ModelConfigParamsChecker: + """ + Define model config params checker. + """ def __init__(self, task_object, params_config=None): """ model_config配置文件内容校验构造方法 @@ -45,7 +49,8 @@ def check(self): param_key, param_val = str(param_key), str(param_val) # 接口预定义参数与params的key重复校验 - if param_key in TK_DEFINED_PARAM_NAME: + if param_key in MINDPET_DEFINED_PARAM_NAME: + # pylint: disable=W1203 logger.warning( f'Find duplicate key [{param_key}] from config in [params] part in model config file.') continue diff --git a/mindpet/security/param_check/option_check_utils.py b/mindpet/security/param_check/option_check_utils.py old mode 100644 new mode 100755 index 2d4d8f3..f2950d2 --- a/mindpet/security/param_check/option_check_utils.py +++ b/mindpet/security/param_check/option_check_utils.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""command option check utils.""" import os from mindpet.log.log import logger @@ -27,6 +28,7 @@ def get_real_path(path): class OptionBase: + """Define command option.""" def __init__(self, option_name, option_value): """ 构造方法 @@ -58,6 +60,7 @@ def _is_dir_path(self): class PathLengthCheckParam: + """Define file path length check params.""" def __init__(self, path_min_limit, path_max_limit, file_min_limit, file_max_limit): """ 路径长度校验项参数构造方法 @@ -73,6 +76,7 @@ def __init__(self, path_min_limit, path_max_limit, file_min_limit, file_max_limi class PathContentCheckParam: + """Define file path check params.""" def __init__(self, base_whitelist_mode, extra_whitelist): """ 路径内容校验项参数构造方法 @@ -84,6 +88,7 @@ def __init__(self, base_whitelist_mode, extra_whitelist): class InteractionByEntrance(OptionBase): + """Define interaction.""" def __init__(self, option_name, option_value, notice_msg, notice_accept_msg, exception_type): """ 根据使用入口(CLI/SDK)执行不同的交互逻辑构造方法 @@ -93,7 +98,7 @@ def __init__(self, option_name, option_value, notice_msg, notice_accept_msg, exc :param notice_accept_msg: 用户接受通知消息 :param exception_type: 异常类型 """ - super(InteractionByEntrance, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) if notice_msg is None or notice_msg == EMPTY_STRING: raise ValueError('Param [notice_msg] is None or empty.') @@ -168,18 +173,18 @@ def _interact_by_sdk(self, force_quit): """ if force_quit: raise self.exception_type(self.notice_msg) - else: - logger.warning(self.notice_msg) + logger.warning(self.notice_msg) class PathContentBlacklistCharactersCheck(OptionBase): + """Define path black list check""" def __init__(self, option_name, option_value): """ 路径内容黑名单字符校验构造方法 :param option_name: 参数名称 :param option_value: 参数值 """ - super(PathContentBlacklistCharactersCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check() def _check(self): @@ -194,13 +199,14 @@ def _check(self): class AbsolutePathCheck(OptionBase): + """Define absolute path check.""" def __init__(self, option_name, option_value): """ 绝对路径校验构造方法 :param option_name: 参数名称 :param option_value: 参数值 """ - super(AbsolutePathCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check() def _check(self): @@ -212,13 +218,14 @@ def _check(self): class PathExistCheck(OptionBase): + """Define path exist check.""" def __init__(self, option_name, option_value): """ 路径存在校验构造方法 :param option_name: 参数名称 :param option_value: 参数值 """ - super(PathExistCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check() def _check(self): @@ -230,13 +237,14 @@ def _check(self): class LinkPathCheck(OptionBase): + """Define link path exist check.""" def __init__(self, option_name, option_value): """ 链接路径校验构造方法 :param option_name: 参数名称 :param option_value: 参数值 """ - super(LinkPathCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check() def _check(self): @@ -248,6 +256,7 @@ def _check(self): class PathContentLengthCheck(OptionBase): + """Define path content option length check.""" def __init__(self, option_name, option_value, path_min_limit, path_max_limit, file_min_limit, file_max_limit): """ 路径内容长度校验构造方法 @@ -270,7 +279,7 @@ def __init__(self, option_name, option_value, path_min_limit, path_max_limit, fi if file_max_limit is not None and not isinstance(file_max_limit, int): raise TypeError('Invalid type for param [file_max_limit] when initializing PathContentLengthCheck.') - super(PathContentLengthCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check(path_min_limit, path_max_limit, file_min_limit, file_max_limit) def _check(self, path_min_limit, path_max_limit, file_min_limit, file_max_limit): @@ -349,6 +358,7 @@ def _file_length_check_item(self, file_min_limit, file_max_limit): class PathContentCharacterCheck(OptionBase): + """Define path content option character check.""" def __init__(self, option_name, option_value, base_whitelist_mode, extra_whitelist): """ 路径内容校验构造方法 @@ -357,7 +367,7 @@ def __init__(self, option_name, option_value, base_whitelist_mode, extra_whiteli :param base_whitelist_mode: 基础白名单模式 :param extra_whitelist: 额外白名单列表 """ - super(PathContentCharacterCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) if base_whitelist_mode is None or base_whitelist_mode == EMPTY_STRING: raise ValueError('Param [base_whitelist_mode] is None or empty.') @@ -377,8 +387,8 @@ def get_base_whitelist_by_mode(base_whitelist_mode): :param base_whitelist_mode: 模式类型,包括全大写/全小写/数字/大小写字母/大小写字母和数字 :return: 基础白名单字符列表 """ - if base_whitelist_mode not in BASE_WHITELIST_CHARACTERS.keys(): - raise ValueError('Invalid param [base_whitelist_mode], only support %s', + if base_whitelist_mode not in BASE_WHITELIST_CHARACTERS: + raise ValueError('Invalid param [base_whitelist_mode], only support %s' % list(BASE_WHITELIST_CHARACTERS.keys())) return BASE_WHITELIST_CHARACTERS.get(base_whitelist_mode) @@ -399,6 +409,7 @@ def _check(self, base_whitelist_mode, extra_whitelist): class PathGranularityCheck(OptionBase): + """Define path option granular check.""" def __init__(self, option_name, option_value, path_including_file): """ 路径粒度校验(文件夹/文件粒度)构造方法 @@ -406,7 +417,7 @@ def __init__(self, option_name, option_value, path_including_file): :param option_value: 参数值 :param path_including_file: 路径是否包含文件名, 传None表示同时兼容 """ - super(PathGranularityCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) if path_including_file is not None and not isinstance(path_including_file, bool): raise TypeError('Invalid type for param [path_including_file] when initializing PathGranularityCheck.') @@ -425,11 +436,11 @@ def _check(self): if not status: raise PathGranularityError( - 'Param [{}] should be a {} path.'.format(self.option_name, - 'file' if self.path_including_file else 'dir')) + f"Param [{self.option_name}] should be a {'file' if self.path_including_file else 'dir'} path.") class PathRightEscalationCheck(OptionBase): + """Define path right option escalation check.""" def __init__(self, option_name, option_value, mode, force_quit, quiet): """ 路径权限提升校验构造方法 @@ -439,7 +450,7 @@ def __init__(self, option_name, option_value, mode, force_quit, quiet): :param force_quit: 异常是否强制退出 :param quiet: 安静模式 """ - super(PathRightEscalationCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) self._check(mode, force_quit, quiet) @staticmethod @@ -541,6 +552,7 @@ def _get_path_mode(self): class FileSizeCheck(OptionBase): + """Define file size check.""" def __init__(self, option_name, option_value, path_including_file): """ 文件大小校验构造方法 @@ -548,7 +560,7 @@ def __init__(self, option_name, option_value, path_including_file): :param option_value: 参数值 :param path_including_file: 路径是否包含文件名 """ - super(FileSizeCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) if path_including_file is None: raise ValueError('Param [path_including_file] is None.') @@ -569,6 +581,7 @@ def _check(self, path_including_file): class DiskFreeSpaceCheck(OptionBase): + """Define disk free space check.""" def __init__(self, option_name, option_value, free_space_limit, force_quit, quiet): """ 路径所在磁盘剩余空间校验构造方法 @@ -578,7 +591,7 @@ def __init__(self, option_name, option_value, free_space_limit, force_quit, quie :param force_quit: 异常是否强制退出 :param quiet: 安静模式 """ - super(DiskFreeSpaceCheck, self).__init__(option_name, option_value) + super().__init__(option_name, option_value) if free_space_limit is None: raise ValueError('Param [free_space_limit] is None.') @@ -601,6 +614,7 @@ def _check(self, force_quit, quiet): info = os.statvfs(self.option_value) free_size = info.f_bsize * info.f_bavail + # pylint: disable=W1203 logger.info( f'Disk where param {self.option_name} is located has {round(free_size / GB_SIZE, 2)} GB free space.') diff --git a/mindpet/task/__init__.py b/mindpet/task/__init__.py old mode 100644 new mode 100755 index dfcf6a1..443dc35 --- a/mindpet/task/__init__.py +++ b/mindpet/task/__init__.py @@ -1,3 +1,3 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" diff --git a/mindpet/task/evaluate_infer/__init__.py b/mindpet/task/evaluate_infer/__init__.py old mode 100644 new mode 100755 index dfcf6a1..443dc35 --- a/mindpet/task/evaluate_infer/__init__.py +++ b/mindpet/task/evaluate_infer/__init__.py @@ -1,3 +1,3 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" diff --git a/mindpet/task/evaluate_infer/evaluate_infer_task.py b/mindpet/task/evaluate_infer/evaluate_infer_task.py old mode 100644 new mode 100755 index cee7f8a..6b10d54 --- a/mindpet/task/evaluate_infer/evaluate_infer_task.py +++ b/mindpet/task/evaluate_infer/evaluate_infer_task.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" import os import json @@ -19,6 +19,7 @@ class EvaluateInferTask: + """EvaluateInferTask class""" def __init__(self, task_type, *args, **kwargs): """ 评估/推理任务构造方法 @@ -40,29 +41,30 @@ def __init__(self, task_type, *args, **kwargs): self._process_param_and_command() except KeyboardInterrupt as ex: record_operation_and_service_error_log( - '{} task is terminated by current user, task has stopped and exited.'.format( - str(self.task_type).capitalize())) + f'{str(self.task_type).capitalize()} task is terminated by current user, task has stopped and exited.') raise ex except Exception as ex: - record_operation_and_service_error_log('{} failed.'.format(str(self.task_type).capitalize())) + record_operation_and_service_error_log(f'{str(self.task_type).capitalize()} failed.') raise ex + # pylint: disable=R1732 def start(self): """ 启动命令 :return: 评估/推理JSON形式结果 """ # 启动评估/推理任务 - record_operation_and_service_info_log('{} task is running.'.format(str(self.task_type).capitalize())) + record_operation_and_service_info_log( + f'{str(self.task_type).capitalize()} task is running.') try: process = subprocess.Popen(self.command, env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except KeyboardInterrupt as ex: record_operation_and_service_error_log( - '{} task is terminated by current user, task has stopped and exited.'.format( - str(self.task_type).capitalize())) + f'{str(self.task_type).capitalize()} task is terminated by current user, task has stopped and exited.') raise ex + except Exception as ex: if ex is None or not str(ex): raise CreateProcessError(f'Exception occurred when creating {self.task_type} task process, ' @@ -76,8 +78,7 @@ def start(self): timeout=self.kwargs.get('timeout')) except KeyboardInterrupt as ex: record_operation_and_service_error_log( - '{} task is terminated by current user, task has stopped and exited.'.format( - str(self.task_type).capitalize())) + f'{str(self.task_type).capitalize()} task is terminated by current user, task has stopped and exited.') raise ex except Exception as ex: if ex is None or not str(ex): @@ -87,21 +88,23 @@ def start(self): f'error message: {str(ex)}.') from ex if rsp_code != 0: - operation_logger_without_std.error('{} failed.'.format(str(self.task_type).capitalize())) - raise TaskError('{} failed.'.format(str(self.task_type).capitalize())) + message = f'{str(self.task_type).capitalize()} failed.' + operation_logger_without_std.error(message) + raise TaskError(message) # 获取评估结果 result = self._get_task_result() if result.get('status') == 0: - record_operation_and_service_info_log('{} successfully.'.format(str(self.task_type).capitalize())) + record_operation_and_service_info_log(f'{str(self.task_type).capitalize()} successfully.') else: record_operation_and_service_warning_log( f'Completed {self.task_type} task, but failed to get {self.result_name} file.') - logger.warning(f'{result.get("error_message")}') + logger.warning(result.get("error_message")) return result + # pylint: disable=W0718 def _get_task_result(self): """ 读取并返回用户落盘的评估/推理结果 @@ -128,6 +131,7 @@ def _get_task_result(self): except (json.JSONDecodeError, TypeError): # 文件为空但文件大小不为0、格式错误 result["error_message"] = f'File {self.result_name} should follow JSON format.' + # pylint: disable=W0703 except Exception as ex: result["error_message"] = f'An error occurred during reading {self.result_name}: {str(ex)}' else: @@ -139,6 +143,7 @@ def _get_task_result(self): return result + # pylint: disable=W0718 def _get_check_result_file_error_msg(self, result_path): """ 获取结果文件校验结果与对应error_message @@ -160,6 +165,7 @@ def _get_check_result_file_error_msg(self, result_path): error_message = f'Detect link path, reject reading file: {self.result_name}.' except ValueError: error_message = f'Invalid file: {self.result_name}.' + # pylint: disable=W0703 except Exception as ex: error_message = f'An error occurred during reading {self.result_name}: {str(ex)}' diff --git a/mindpet/task/evaluate_infer/result_file_check.py b/mindpet/task/evaluate_infer/result_file_check.py old mode 100644 new mode 100755 index 9a5e43d..82b6a79 --- a/mindpet/task/evaluate_infer/result_file_check.py +++ b/mindpet/task/evaluate_infer/result_file_check.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" from mindpet.security.param_check.base_check import BaseCheckParam, BaseCheck from mindpet.security.param_check.option_check_utils import PathExistCheck, LinkPathCheck, get_real_path class ResultFileCheckParam(BaseCheckParam): + """ResultFileCheckParam class""" def __init__(self, path_content_check_param, mode, @@ -24,15 +25,9 @@ def __init__(self, self.path_content_check_param = path_content_check_param +# pylint: disable=W0246 class ResultFileCheck(BaseCheck): - def __init__(self, option_name, option_value): - """ - 任务结果文件(eval_result.json/infer_result.json)合法性校验构造方法 - :param option_name: 参数名称 - :param option_value: 参数值 - """ - super().__init__(option_name, option_value) - + """ResultFileCheck class""" def check(self, check_param): """ 任务结果文件(eval_result.json/infer_result.json)合法性校验 diff --git a/mindpet/task/finetune/__init__.py b/mindpet/task/finetune/__init__.py old mode 100644 new mode 100755 index dfcf6a1..33f0bbe --- a/mindpet/task/finetune/__init__.py +++ b/mindpet/task/finetune/__init__.py @@ -1,3 +1,3 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. diff --git a/mindpet/task/finetune/finetune_options_check.py b/mindpet/task/finetune/finetune_options_check.py old mode 100644 new mode 100755 index 3d00cba..d4443c5 --- a/mindpet/task/finetune/finetune_options_check.py +++ b/mindpet/task/finetune/finetune_options_check.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" from mindpet.utils.constants import GB_SIZE from mindpet.security.param_check.base_check import BaseCheckParam, BaseCheck @@ -10,6 +10,7 @@ class FinetuneOptionsCheckParam(BaseCheckParam): + """class FinetuneOptionsCheckParam class""" def __init__(self, path_length_check_param, path_content_check_param, @@ -32,6 +33,7 @@ def __init__(self, class FinetuneOptionsCheck(BaseCheck): + """FinetuneOptionsCheck class""" def __init__(self, option_name, option_value, disk_space_check=False): super().__init__(option_name, option_value) self.disk_space_check = disk_space_check @@ -51,6 +53,7 @@ def check(self, check_param): self._real_path_check_item(check_param) def _origin_path_check_item(self): + """_origin_path_check_item""" # 路径内容黑名单校验 PathContentBlacklistCharactersCheck(option_name=self.option_name, option_value=self.option_value) @@ -64,6 +67,7 @@ def _origin_path_check_item(self): LinkPathCheck(option_name=self.option_name, option_value=self.option_value) def _real_path_check_item(self, check_param): + """_real_path_check_item""" # 路径长度校验 PathContentLengthCheck(option_name=self.option_name, option_value=self.option_value, path_min_limit=check_param.path_length_check_param.path_min_limit, diff --git a/mindpet/task/finetune/finetune_task.py b/mindpet/task/finetune/finetune_task.py old mode 100644 new mode 100755 index 9d91802..1349aeb --- a/mindpet/task/finetune/finetune_task.py +++ b/mindpet/task/finetune/finetune_task.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" import os import subprocess @@ -15,6 +15,7 @@ class FinetuneTask: + """FinetuneTask class""" def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @@ -31,6 +32,7 @@ def __init__(self, *args, **kwargs): record_operation_and_service_error_log('Finetune failed.') raise ex + # pylint: disable=R1732 def start(self): """ 启动命令 @@ -47,8 +49,8 @@ def start(self): raise ex except Exception as ex: if ex is None or not str(ex): - raise CreateProcessError(f'Exception occurred when creating finetune task process, ' - f'no error message available.') from ex + raise CreateProcessError('Exception occurred when creating finetune task process, ' + 'no error message available.') from ex raise CreateProcessError(f'Exception occurred when creating finetune task process, ' f'error message: {str(ex)}.') from ex @@ -61,10 +63,10 @@ def start(self): raise ex except Exception as ex: if ex is None or not str(ex): - raise MonitorProcessRspError(f'Exception occurred when monitoring finetune task process, ' - f'no error message available.') from ex - raise MonitorProcessRspError(f'Exception occurred when monitoring finetune task process, ' - f'error message: {str(ex)}.') from ex + raise MonitorProcessRspError('Exception occurred when monitoring finetune task process, ' + 'no error message available.') from ex + raise MonitorProcessRspError('Exception occurred when monitoring finetune task process, ' + 'error message: {str(ex)}.') from ex if rsp_code != 0: operation_logger_without_std.error('Finetune failed.') diff --git a/mindpet/task/option_decorators.py b/mindpet/task/option_decorators.py old mode 100644 new mode 100755 index 1193a1c..fb2e079 --- a/mindpet/task/option_decorators.py +++ b/mindpet/task/option_decorators.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.""" from mindpet.task.options import DataPathOption, PretrainedModelPathOption, OutputPathOption, BootFilePathOption, \ ModelConfigPathOption, QuietOption, CkptPathOption, TimeoutOption diff --git a/mindpet/task/options/__init__.py b/mindpet/task/options/__init__.py old mode 100644 new mode 100755 index 6d2cfcb..c9b625d --- a/mindpet/task/options/__init__.py +++ b/mindpet/task/options/__init__.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""__init__""" from mindpet.task.options.data_path_option import DataPathOption from mindpet.task.options.pretrained_model_path_option import PretrainedModelPathOption diff --git a/mindpet/task/options/boot_file_path_option.py b/mindpet/task/options/boot_file_path_option.py old mode 100644 new mode 100755 index b2a5709..2f4f451 --- a/mindpet/task/options/boot_file_path_option.py +++ b/mindpet/task/options/boot_file_path_option.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""boot file path option""" import os import click from click.exceptions import MissingParameter, BadParameter @@ -11,6 +11,7 @@ class BootFilePathOption(click.core.Option): + """BootFilePathOption class""" def __init__(self): super().__init__( param_decls=('-bp', '--boot_file_path'), diff --git a/mindpet/task/options/ckpt_path_option.py b/mindpet/task/options/ckpt_path_option.py old mode 100644 new mode 100755 index 43da9e5..b66070f --- a/mindpet/task/options/ckpt_path_option.py +++ b/mindpet/task/options/ckpt_path_option.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""ckpt path option""" import click from click.exceptions import MissingParameter from mindpet.utils.constants import PATH_MODE_LIMIT @@ -10,6 +10,7 @@ class CkptPathOption(click.core.Option): + """CkptPathOption class""" def __init__(self): super().__init__( param_decls=('-cp', '--ckpt_path'), diff --git a/mindpet/task/options/data_path_option.py b/mindpet/task/options/data_path_option.py old mode 100644 new mode 100755 index fefb6fe..19e1dd6 --- a/mindpet/task/options/data_path_option.py +++ b/mindpet/task/options/data_path_option.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""data path option""" import click from click.exceptions import MissingParameter @@ -10,6 +11,7 @@ class DataPathOption(click.core.Option): + """DataPathOption class""" def __init__(self): super().__init__( param_decls=('-dp', '--data_path'), diff --git a/mindpet/task/options/model_config_path_option.py b/mindpet/task/options/model_config_path_option.py old mode 100644 new mode 100755 index f664fc7..c10baba --- a/mindpet/task/options/model_config_path_option.py +++ b/mindpet/task/options/model_config_path_option.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""model config path option""" import click from click.exceptions import BadParameter @@ -10,6 +11,7 @@ class ModelConfigPathOption(click.core.Option): + """ModelConfigPathOption class""" def __init__(self): super().__init__( param_decls=('-mc', '--model_config_path'), diff --git a/mindpet/task/options/output_path_option.py b/mindpet/task/options/output_path_option.py old mode 100644 new mode 100755 index 10555d7..a007cdf --- a/mindpet/task/options/output_path_option.py +++ b/mindpet/task/options/output_path_option.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""output path option""" import click from click.exceptions import MissingParameter @@ -10,6 +11,7 @@ class OutputPathOption(click.core.Option): + """OutputPathOption class""" def __init__(self): super().__init__( param_decls=('-op', '--output_path'), diff --git a/mindpet/task/options/path_check_param.py b/mindpet/task/options/path_check_param.py old mode 100644 new mode 100755 index 50e2df0..b43ecbd --- a/mindpet/task/options/path_check_param.py +++ b/mindpet/task/options/path_check_param.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""path check param""" from mindpet.utils.constants import SPACE_CHARACTER from mindpet.security.param_check.option_check_utils import PathLengthCheckParam, PathContentCheckParam diff --git a/mindpet/task/options/pretrained_model_path_option.py b/mindpet/task/options/pretrained_model_path_option.py old mode 100644 new mode 100755 index 1004e12..326b01e --- a/mindpet/task/options/pretrained_model_path_option.py +++ b/mindpet/task/options/pretrained_model_path_option.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""pretrained model path option""" import click from mindpet.utils.constants import PATH_MODE_LIMIT @@ -9,6 +10,7 @@ class PretrainedModelPathOption(click.core.Option): + """PretrainedModelPathOption class""" def __init__(self): super().__init__( param_decls=('-pm', '--pretrained_model_path'), diff --git a/mindpet/task/options/quiet_option.py b/mindpet/task/options/quiet_option.py old mode 100644 new mode 100755 index 406f258..3273f60 --- a/mindpet/task/options/quiet_option.py +++ b/mindpet/task/options/quiet_option.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""quiet option""" import click from click.exceptions import BadParameter class QuietOption(click.core.Option): + """QuietOption class""" def __init__(self): super().__init__( param_decls=('-q', '--quiet'), @@ -16,6 +18,7 @@ def __init__(self): callback=self.quiet_callback ) + # pylint: disable=W0613 @staticmethod def quiet_callback(ctx, params, value): # --quiet参数仅允许被定义在首位, 其ctx上下文属性必须为空 diff --git a/mindpet/task/options/timeout_option.py b/mindpet/task/options/timeout_option.py old mode 100644 new mode 100755 index 7a27d02..0235ef1 --- a/mindpet/task/options/timeout_option.py +++ b/mindpet/task/options/timeout_option.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""timeout option""" import re import click @@ -9,6 +10,7 @@ class TimeoutOption(click.core.Option): + """TimeoutOption class""" def __init__(self): super().__init__( param_decls=('-t', '--timeout'), @@ -20,6 +22,7 @@ def __init__(self): @staticmethod def get_timeout_hours(match_result): + """get_timeout_hours""" res = 0 extract_days_str = match_result.group(1) @@ -41,6 +44,7 @@ def get_timeout_hours(match_result): return res + # pylint: disable=W0613 def timeout_callback(self, ctx, param, value): """ timeout参数click回调方法 diff --git a/mindpet/utils/__init__.py b/mindpet/utils/__init__.py old mode 100644 new mode 100755 index dfcf6a1..33f0bbe --- a/mindpet/utils/__init__.py +++ b/mindpet/utils/__init__.py @@ -1,3 +1,3 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. \ No newline at end of file +# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. diff --git a/mindpet/utils/constants.py b/mindpet/utils/constants.py old mode 100644 new mode 100755 index 8d0f90d..e2470fb --- a/mindpet/utils/constants.py +++ b/mindpet/utils/constants.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""Utils Constants""" import os import stat @@ -77,8 +77,8 @@ # 命令行模式异常退出状态码 ABNORMAL_EXIT_CODE = 1 -# tk定义入参 -TK_DEFINED_PARAM_NAME = ( +# MindPet定义入参 +MINDPET_DEFINED_PARAM_NAME = ( 'dp', 'data_path', 'op', 'output_path', 'bp', 'boot_file_path', 'mc', 'model_config_path', 'pm', 'pretrained_model_path', 'cp', 'ckpt_path', 'q', 'quiet', 't', 'timeout', 'advanced_config') @@ -101,8 +101,7 @@ EVAL_INFER_TASK_NAMES = [EVALUATE_TASK_NAME, INFER_TASK_NAME] # 微调工具包SDK接口清单 -TK_SDK_INTERFACE_NAMES = [FINETUNE_TASK_NAME, EVALUATE_TASK_NAME, INFER_TASK_NAME] +MINDPET_SDK_INTERFACE_NAMES = [FINETUNE_TASK_NAME, EVALUATE_TASK_NAME, INFER_TASK_NAME] # 微调算法清单 DELTA_LIST = ['lora', 'prefixtuning', 'adapter', 'low_rank_adapter', 'bitfit'] - diff --git a/mindpet/utils/entrance_monitor.py b/mindpet/utils/entrance_monitor.py old mode 100644 new mode 100755 index 9c3eea7..925e18c --- a/mindpet/utils/entrance_monitor.py +++ b/mindpet/utils/entrance_monitor.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""Entrance Monitor""" class EntranceMonitor: + """Entrance Monitor""" _instance = None - def __new__(cls, *args, **kwargs): + def __new__(cls): if cls._instance is None: cls._instance = object.__new__(cls) return cls._instance diff --git a/mindpet/utils/exceptions.py b/mindpet/utils/exceptions.py old mode 100644 new mode 100755 index e06bb80..a4036d2 --- a/mindpet/utils/exceptions.py +++ b/mindpet/utils/exceptions.py @@ -1,14 +1,14 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""MindPet Exceptions""" from mindpet.utils.constants import EMPTY_STRING -# mxTuningKit包基础异常 -class TKError(Exception): +# MindPet包基础异常 +class MindPetError(Exception): def __init__(self, error_info=None): - super(TKError, self).__init__() + super().__init__() self.error_info = error_info def __str__(self): @@ -16,119 +16,142 @@ def __str__(self): # 基础操作异常 -class UnexpectedError(TKError): +class UnexpectedError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(UnexpectedError, self).__init__(error_info) + super().__init__(error_info) -class MakeDirError(TKError): +class MakeDirError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(MakeDirError, self).__init__(error_info) + super().__init__(error_info) -class ReadYamlFileError(TKError): +class ReadYamlFileError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(ReadYamlFileError, self).__init__(error_info) + super().__init__(error_info) -class ManualCancelError(TKError): +class ManualCancelError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(ManualCancelError, self).__init__(error_info) + super().__init__(error_info) # 任务异常 -class CreateProcessError(TKError): +class CreateProcessError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(CreateProcessError, self).__init__(error_info) + super().__init__(error_info) -class MonitorProcessRspError(TKError): +class MonitorProcessRspError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(MonitorProcessRspError, self).__init__(error_info) + super().__init__(error_info) -class TaskError(TKError): +class TaskError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(TaskError, self).__init__(error_info) + super().__init__(error_info) -class UnsupportedPlatformError(TKError): +class UnsupportedPlatformError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(UnsupportedPlatformError, self).__init__(error_info) + super().__init__(error_info) # 接口参数校验异常 -class LinkPathError(TKError): +class LinkPathError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(LinkPathError, self).__init__(error_info) + super().__init__(error_info) -class LowDiskFreeSizeRiskError(TKError): +class LowDiskFreeSizeRiskError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(LowDiskFreeSizeRiskError, self).__init__(error_info) + super().__init__(error_info) -class FileOversizeError(TKError): +class FileOversizeError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(FileOversizeError, self).__init__(error_info) + super().__init__(error_info) -class PathContentError(TKError): +class PathContentError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathContentError, self).__init__(error_info) + super().__init__(error_info) -class PathLengthError(TKError): +class PathLengthError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathLengthError, self).__init__(error_info) + super().__init__(error_info) -class FileNameLengthError(TKError): +class FileNameLengthError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(FileNameLengthError, self).__init__(error_info) + super().__init__(error_info) -class AbsolutePathError(TKError): +class AbsolutePathError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(AbsolutePathError, self).__init__(error_info) + super().__init__(error_info) -class PathGranularityError(TKError): +class PathGranularityError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathGranularityError, self).__init__(error_info) + super().__init__(error_info) -class PathOwnerError(TKError): +class PathOwnerError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathOwnerError, self).__init__(error_info) + super().__init__(error_info) -class PathModeError(TKError): +class PathModeError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathModeError, self).__init__(error_info) + super().__init__(error_info) -class PathRightEscalationError(TKError): +class PathRightEscalationError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathRightEscalationError, self).__init__(error_info) + super().__init__(error_info) -class PathLoopError(TKError): +class PathLoopError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(PathLoopError, self).__init__(error_info) + super().__init__(error_info) # model_config配置文件校验异常 -class ModelConfigKeysInfoError(TKError): +class ModelConfigKeysInfoError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(ModelConfigKeysInfoError, self).__init__(error_info) + super().__init__(error_info) -class ModelConfigParamsInfoError(TKError): +class ModelConfigParamsInfoError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(ModelConfigParamsInfoError, self).__init__(error_info) + super().__init__(error_info) -class ModelConfigFreezeInfoError(TKError): +class ModelConfigFreezeInfoError(MindPetError): + # pylint: disable=W0235 def __init__(self, error_info=None): - super(ModelConfigFreezeInfoError, self).__init__(error_info) + super().__init__(error_info) diff --git a/mindpet/utils/io_utils.py b/mindpet/utils/io_utils.py old mode 100644 new mode 100755 index 4b38422..e93e354 --- a/mindpet/utils/io_utils.py +++ b/mindpet/utils/io_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""IO APIs""" import os import json import yaml diff --git a/mindpet/utils/task_utils.py b/mindpet/utils/task_utils.py old mode 100644 new mode 100755 index fa9df50..76c1a6e --- a/mindpet/utils/task_utils.py +++ b/mindpet/utils/task_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""Task APIs""" import os import signal import time @@ -78,12 +78,12 @@ def create_output_path_subdir_with_uuid(output_path): :param output_path: 入参output_path值 """ uuid_name = str(uuid.uuid4()).replace('-', '') - random_dir = f'tk_{uuid_name}'.upper() + random_dir = f'mindpet_{uuid_name}'.upper() full_output_path = os.path.join(output_path, random_dir).replace('\\', '/') while os.path.exists(full_output_path): uuid_name = str(uuid.uuid4()).replace('-', '') - random_dir = f'tk_{uuid_name}'.upper() + random_dir = f'mindpet_{uuid_name}'.upper() full_output_path = os.path.join(output_path, random_dir).replace('\\', '/') try: @@ -91,6 +91,7 @@ def create_output_path_subdir_with_uuid(output_path): except Exception as ex: raise MakeDirError(f'Failed to create directory from param [output_path], error message: {str(ex)}') from ex + # pylint: disable=W1203 logger.info(f'Create output directory successfully, directory name: {random_dir}') return full_output_path @@ -107,8 +108,8 @@ def model_config_keys_check_item(content, config_keys): try: for key_item in content.keys(): if key_item not in config_keys: - raise ModelConfigKeysInfoError('Invalid config in model config file, ' - 'only support [{}]'.format('/'.join(config_keys))) + raise ModelConfigKeysInfoError("Invalid config in model config file, " + f"only support [{'/'.join(config_keys)}]") except AttributeError as ex: raise ModelConfigKeysInfoError('Invalid key in model config file.') from ex @@ -170,12 +171,16 @@ def handle_exception_log(exception): if isinstance(exception, click.exceptions.Abort): logger.info('Current command is artificially canceled.') elif isinstance(exception, click.exceptions.NoSuchOption): + # pylint: disable=W1203 logger.error(f'Invalid param detected, error message: {str(exception)}') elif isinstance(exception, click.exceptions.MissingParameter): + # pylint: disable=W1203 logger.error(f'Necessary param is missing, error message: {str(exception)}') elif isinstance(exception, Exception): if exception is None or not str(exception): logger.error('Exception occurred, no error message available.') elif str(exception).isdigit(): + # pylint: disable=W1203 logger.error(f'Exception occurred, error code: {str(exception)}') + # pylint: disable=W1203 logger.error(f'Exception occurred, error message: {str(exception)}') diff --git a/mindpet/utils/version_control.py b/mindpet/utils/version_control.py old mode 100644 new mode 100755 index 5408248..48aabae --- a/mindpet/utils/version_control.py +++ b/mindpet/utils/version_control.py @@ -1,3 +1,7 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Version Control APIs""" import mindspore as ms from mindspore import nn from .version_utils import is_version_ge @@ -7,4 +11,4 @@ def get_dropout(dropout_prob): dropout = nn.Dropout(p=dropout_prob) else: dropout = nn.Dropout(keep_prob=1 - dropout_prob) - return dropout \ No newline at end of file + return dropout diff --git a/mindpet/utils/version_utils.py b/mindpet/utils/version_utils.py old mode 100644 new mode 100755 index 2a26703..6d14484 --- a/mindpet/utils/version_utils.py +++ b/mindpet/utils/version_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +"""Version APIs""" def is_version_le(current_version, base_version): """ description: Check whether the current version is lower than or equal to the base version. @@ -45,4 +45,4 @@ def is_version_ge(current_version, base_version): continue if int(x) != int(y): return int(x) >= int(y) - return True \ No newline at end of file + return True diff --git a/pylintrc b/pylintrc old mode 100644 new mode 100755 index a139913..27233f2 --- a/pylintrc +++ b/pylintrc @@ -38,7 +38,7 @@ enable=indexing-exception,old-raise-syntax # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager +disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,W0703 # Set the cache size for astng objects. diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt old mode 100644 new mode 100755 index 9e3ae66..d14f0ac --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,9 @@ pyyaml >= 5.3 tqdm pylint -pytest \ No newline at end of file +pytest +click == 8.1.3 +mindspore == 2.0.0 +Pillow == 9.5.0 +mindformers +requests \ No newline at end of file diff --git a/set_up.py b/set_up.py old mode 100644 new mode 100755 index 6538322..cfb1d75 --- a/set_up.py +++ b/set_up.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. +"""Set up""" from distutils.core import setup from setuptools import find_packages @@ -50,7 +51,7 @@ def do_setup(packages_data): package_data=packages_data, entry_points={ 'console_scripts': [ - 'tk = tk.tk_main:cli_wrapper' + 'mindpet = mindpet.mindpet_main:cli_wrapper' ], }, cmdclass=cmd_class @@ -59,7 +60,7 @@ def do_setup(packages_data): if __name__ == '__main__': package_data = { - 'tk': [ + 'mindpet': [ '*.py', '*/*.py', '*/*/*.py', diff --git a/test/developer_test/exit_code/launcher_demo.py b/test/developer_test/exit_code/launcher_demo.py old mode 100644 new mode 100755 index 991756c..fed7c62 --- a/test/developer_test/exit_code/launcher_demo.py +++ b/test/developer_test/exit_code/launcher_demo.py @@ -37,7 +37,7 @@ def finetune_launcher(user_name): change_path_mode(model_config_path, MODE_750) change_path_mode(boot_file_path, MODE_750) - cmd = ['tk', + cmd = ['mindpet', 'finetune', '--quiet', '--data_path', dataset_path, @@ -75,7 +75,7 @@ def evaluate_launcher(user_name): change_path_mode(model_config_path, MODE_750) change_path_mode(boot_file_path, MODE_750) - cmd = ['tk', + cmd = ['mindpet', 'evaluate', '--quiet', '--data_path', dataset_path, @@ -113,7 +113,7 @@ def infer_launcher(user_name): change_path_mode(model_config_path, MODE_750) change_path_mode(boot_file_path, MODE_750) - cmd = ['tk', + cmd = ['mindpet', 'infer', '--quiet', '--data_path', dataset_path, diff --git a/test/developer_test/task/eval_launcher.py b/test/developer_test/task/eval_launcher.py old mode 100644 new mode 100755 index 4032ee4..ffad937 --- a/test/developer_test/task/eval_launcher.py +++ b/test/developer_test/task/eval_launcher.py @@ -20,7 +20,7 @@ chmod 750 dataset outputs ckpt eval_launcher.py model_config_eval.yaml -tk evaluate --data_path $TEST_PATH/dataset/ --ckpt_path $TEST_PATH/ckpt/ --output_path $TEST_PATH/outputs/ \ +mindpet evaluate --data_path $TEST_PATH/dataset/ --ckpt_path $TEST_PATH/ckpt/ --output_path $TEST_PATH/outputs/ \ --model_config_path $TEST_PATH/model_config_eval.yaml --boot_file_path $TEST_PATH/eval_launcher.py """ diff --git a/test/developer_test/task/finetune_launcher.py b/test/developer_test/task/finetune_launcher.py old mode 100644 new mode 100755 index 35498eb..934e6f5 --- a/test/developer_test/task/finetune_launcher.py +++ b/test/developer_test/task/finetune_launcher.py @@ -19,7 +19,7 @@ chmod 750 dataset outputs pretrained_models finetune_launcher.py model_config_finetune.yaml -tk finetune --data_path $TEST_PATH/dataset/ --output_path $TEST_PATH/outputs/ \ +mindpet finetune --data_path $TEST_PATH/dataset/ --output_path $TEST_PATH/outputs/ \ --pretrained_model_path $TEST_PATH/pretrained_models/ \ --model_config_path $TEST_PATH/model_config_finetune.yaml --boot_file_path $TEST_PATH/finetune_launcher.py """ diff --git a/test/developer_test/task/infer_launcher.py b/test/developer_test/task/infer_launcher.py old mode 100644 new mode 100755 index 4a68dd0..d263481 --- a/test/developer_test/task/infer_launcher.py +++ b/test/developer_test/task/infer_launcher.py @@ -20,7 +20,7 @@ chmod 750 dataset outputs ckpt infer_launcher.py model_config_infer.yaml -tk infer --data_path $TEST_PATH/dataset/ --ckpt_path $TEST_PATH/ckpt/ --output_path $TEST_PATH/outputs/ \ +mindpet infer --data_path $TEST_PATH/dataset/ --ckpt_path $TEST_PATH/ckpt/ --output_path $TEST_PATH/outputs/ \ --model_config_path $TEST_PATH/model_config_infer.yaml --boot_file_path $TEST_PATH/infer_launcher.py """ diff --git a/test/developer_test/task/model_config_eval.yaml b/test/developer_test/task/model_config_eval.yaml old mode 100644 new mode 100755 diff --git a/test/developer_test/task/model_config_finetune.yaml b/test/developer_test/task/model_config_finetune.yaml old mode 100644 new mode 100755 diff --git a/test/developer_test/task/model_config_infer.yaml b/test/developer_test/task/model_config_infer.yaml old mode 100644 new mode 100755 diff --git a/test/unit_test/delta/test_adapter.py b/test/unit_test/delta/test_adapter.py old mode 100644 new mode 100755 index b339d69..701c378 --- a/test/unit_test/delta/test_adapter.py +++ b/test/unit_test/delta/test_adapter.py @@ -2,7 +2,10 @@ # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. -import os +import sys +sys.path.append('.') + +import os import logging import unittest import argparse @@ -363,7 +366,7 @@ def test_shard_with_leakyrelu_activation(self): adapter_layer = AdapterLayer(hidden_size=32, bottleneck_size=8, non_linearity='leakyrelu') leakyrelu_strategy_non_linearity = ((1, 1), (1, 1)) adapter_layer.shard(strategy_non_linearity=leakyrelu_strategy_non_linearity) - self.assertEqual(adapter_layer.tk_delta_adapter_block.tk_delta_adapter_non_linear.select_op.in_strategy, + self.assertEqual(adapter_layer.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.select_op.in_strategy, leakyrelu_strategy_non_linearity) logging.info('Finish test_shard_with_LeakyReLU_activation') @@ -372,7 +375,7 @@ def test_shard_with_logsigmoid_activation(self): adapter_layer = AdapterLayer(hidden_size=32, bottleneck_size=8, non_linearity='logsigmoid') logsigmoid_strategy_non_linearity = ((1, 1), (1, 1)) adapter_layer.shard(strategy_non_linearity=logsigmoid_strategy_non_linearity) - self.assertEqual(adapter_layer.tk_delta_adapter_block.tk_delta_adapter_non_linear.log.in_strategy, + self.assertEqual(adapter_layer.mindpet_delta_adapter_block.mindpet_delta_adapter_non_linear.log.in_strategy, logsigmoid_strategy_non_linearity) logging.info('Finish test_shard_with_logsigmoid_activation') diff --git a/test/unit_test/delta/test_lora.py b/test/unit_test/delta/test_lora.py old mode 100644 new mode 100755 index a2610c8..348a9b3 --- a/test/unit_test/delta/test_lora.py +++ b/test/unit_test/delta/test_lora.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. - +import sys +sys.path.append('.') import os import logging import unittest @@ -180,24 +181,24 @@ def test_params_with_legal_lora_rank(self): self.assertEqual(2, lora.lora_rank) logging.info("Finish test_params_with_legal_lora_rank") - def test_params_with_legal_tk_delta_lora_a(self): - logging.info('Start test_params_with_legal_tk_delta_lora_a') + def test_params_with_legal_mindpet_delta_lora_a(self): + logging.info('Start test_params_with_legal_mindpet_delta_lora_a') lora = LoRADense(in_channels=1, out_channels=1, lora_rank=2, lora_alpha=4, lora_dropout=0.9, lora_a_init=Tensor(shape=(2, 1), dtype=mstype.int8, init=One())) - target = Tensor([[1], [1]]).asnumpy() == lora.tk_delta_lora_a.asnumpy() + target = Tensor([[1], [1]]).asnumpy() == lora.mindpet_delta_lora_a.asnumpy() for result in target: self.assertTrue(result) - logging.info("Finish test_params_with_legal_tk_delta_lora_a") + logging.info("Finish test_params_with_legal_mindpet_delta_lora_a") - def test_params_with_legal_tk_delta_lora_b(self): - logging.info('Start test_params_with_legal_tk_delta_lora_b') + def test_params_with_legal_mindpet_delta_lora_b(self): + logging.info('Start test_params_with_legal_mindpet_delta_lora_b') lora = LoRADense(in_channels=3, out_channels=2, lora_rank=3, lora_alpha=4, lora_dropout=0.9, lora_b_init=Tensor(shape=(2, 3), dtype=mstype.int8, init=One())) - target = Tensor([[1, 1, 1], [1, 1, 1]]).asnumpy() == lora.tk_delta_lora_b.asnumpy() + target = Tensor([[1, 1, 1], [1, 1, 1]]).asnumpy() == lora.mindpet_delta_lora_b.asnumpy() for _ in target: for result in _: self.assertTrue(result) - logging.info("Finish test_params_with_legal_tk_delta_lora_b") + logging.info("Finish test_params_with_legal_mindpet_delta_lora_b") # construct def test_construct_with_has_bias(self): diff --git a/test/unit_test/delta/test_low_rank_adapter.py b/test/unit_test/delta/test_low_rank_adapter.py old mode 100644 new mode 100755 index f11bd45..9fec36d --- a/test/unit_test/delta/test_low_rank_adapter.py +++ b/test/unit_test/delta/test_low_rank_adapter.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. - +import sys +sys.path.append('.') import os import logging import unittest diff --git a/test/unit_test/delta/test_prefix_layer.py b/test/unit_test/delta/test_prefix_layer.py old mode 100644 new mode 100755 index 279e841..b48c212 --- a/test/unit_test/delta/test_prefix_layer.py +++ b/test/unit_test/delta/test_prefix_layer.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. - +import sys +sys.path.append('.') import logging import os import shutil diff --git a/test/unit_test/delta/test_rdrop.py b/test/unit_test/delta/test_rdrop.py old mode 100644 new mode 100755 index d59990c..17ebd20 --- a/test/unit_test/delta/test_rdrop.py +++ b/test/unit_test/delta/test_rdrop.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import logging import os import unittest diff --git a/test/unit_test/entry/resource/boot_file/boot_evaluate.py b/test/unit_test/entry/resource/boot_file/boot_evaluate.py deleted file mode 100644 index be446d9..0000000 --- a/test/unit_test/entry/resource/boot_file/boot_evaluate.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import stat -import json -import logging -import argparse -import os.path - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--learning_rate', type=str) - parser.add_argument('--batch_size', type=str) - parser.add_argument('--advanced_config', type=str) - args = parser.parse_args() - - logging.info(args) - - result_content = 'evaluate task success.' - result_file_path = os.path.join(args.output_path, 'eval_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(result_content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/entry/resource/boot_file/boot_finetune.py b/test/unit_test/entry/resource/boot_file/boot_finetune.py deleted file mode 100644 index deb0bb2..0000000 --- a/test/unit_test/entry/resource/boot_file/boot_finetune.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--pretrained_model_path', type=str) - parser.add_argument('--learning_rate', type=str) - parser.add_argument('--batch_size', type=str) - parser.add_argument('--advanced_config', type=str) - args = parser.parse_args() - - logging.info(args) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/entry/resource/boot_file/boot_infer.py b/test/unit_test/entry/resource/boot_file/boot_infer.py deleted file mode 100644 index 84b78f6..0000000 --- a/test/unit_test/entry/resource/boot_file/boot_infer.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import stat -import json -import logging -import argparse -import os.path - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--learning_rate', type=str) - parser.add_argument('--batch_size', type=str) - parser.add_argument('--advanced_config', type=str) - args = parser.parse_args() - - logging.info(args) - - result_content = 'infer task success.' - result_file_path = os.path.join(args.output_path, 'infer_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(result_content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/entry/resource/model_config/model_config_evaluate_infer.yaml b/test/unit_test/entry/resource/model_config/model_config_evaluate_infer.yaml deleted file mode 100644 index 892f6d4..0000000 --- a/test/unit_test/entry/resource/model_config/model_config_evaluate_infer.yaml +++ /dev/null @@ -1,3 +0,0 @@ -params: - learning_rate: 1e-4 - batch_size: 32 \ No newline at end of file diff --git a/test/unit_test/entry/resource/model_config/model_config_finetune.yaml b/test/unit_test/entry/resource/model_config/model_config_finetune.yaml deleted file mode 100644 index 734100f..0000000 --- a/test/unit_test/entry/resource/model_config/model_config_finetune.yaml +++ /dev/null @@ -1,9 +0,0 @@ -params: - learning_rate: 1e-4 - batch_size: 32 - -freeze: - include: - [ 'network' ] - exclude: - [ 'block2' ] \ No newline at end of file diff --git a/test/unit_test/entry/test_tk_main.py b/test/unit_test/entry/test_tk_main.py deleted file mode 100644 index d2d1d2c..0000000 --- a/test/unit_test/entry/test_tk_main.py +++ /dev/null @@ -1,481 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import os -import stat -import shutil -import unittest -import subprocess -import pytest - -FLAG = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 -MODE = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - -logging.getLogger().setLevel(logging.INFO) - - -class TestTkMain(unittest.TestCase): - root_path = None - data_path = None - output_path = None - pretrained_model_path = None - ckpt_path = None - finetune_model_config_path = None - evaluate_infer_model_config_path = None - finetune_boot_path = None - evaluate_boot_path = None - infer_boot_path = None - - @staticmethod - def _create_subprocess(cmd): - """ - 使用cmd创建subprocess子进程 - :param cmd: subprocess命令 - :return: subprocess进程对象 - """ - return subprocess.Popen(cmd, env=os.environ, shell=False, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) - - @classmethod - def setUpClass(cls): - cls.root_path = os.path.join('/', 'tmp', 'test_tk_main') - if not os.path.exists(cls.root_path): - os.makedirs(cls.root_path) - - cls.data_path = os.path.join(cls.root_path, 'data_path') - if not os.path.exists(cls.data_path): - os.makedirs(cls.data_path) - - cls.output_path = os.path.join(cls.root_path, 'output_path') - if not os.path.exists(cls.output_path): - os.makedirs(cls.output_path) - - cls.pretrained_model_path = os.path.join(cls.root_path, 'pretrained_model_path') - if not os.path.exists(cls.pretrained_model_path): - os.makedirs(cls.pretrained_model_path) - - cls.ckpt_path = os.path.join(cls.root_path, 'ckpt_path') - if not os.path.exists(cls.ckpt_path): - os.makedirs(cls.ckpt_path) - - src_finetune_model_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/model_config/model_config_finetune.yaml') - - src_evaluate_infer_model_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/model_config/model_config_evaluate_infer.yaml') - - src_finetune_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_finetune.py') - - src_evaluate_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_evaluate.py') - - src_infer_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_infer.py') - - cls.finetune_model_config_path = os.path.join(cls.root_path, 'model_config_finetune.yaml') - cls.evaluate_infer_model_config_path = os.path.join(cls.root_path, 'model_config_evaluate_infer.yaml') - cls.finetune_boot_path = os.path.join(cls.root_path, 'boot_finetune.py') - cls.evaluate_boot_path = os.path.join(cls.root_path, 'boot_evaluate.py') - cls.infer_boot_path = os.path.join(cls.root_path, 'boot_infer.py') - - shutil.copyfile(src_finetune_model_config_path, cls.finetune_model_config_path) - shutil.copyfile(src_evaluate_infer_model_config_path, cls.evaluate_infer_model_config_path) - shutil.copyfile(src_finetune_boot_path, cls.finetune_boot_path) - shutil.copyfile(src_evaluate_boot_path, cls.evaluate_boot_path) - shutil.copyfile(src_infer_boot_path, cls.infer_boot_path) - - cls._chmod(cls.data_path) - cls._chmod(cls.output_path) - cls._chmod(cls.pretrained_model_path) - cls._chmod(cls.ckpt_path) - cls._chmod(cls.finetune_model_config_path) - cls._chmod(cls.evaluate_infer_model_config_path) - cls._chmod(cls.finetune_boot_path) - cls._chmod(cls.evaluate_boot_path) - cls._chmod(cls.infer_boot_path) - - @classmethod - def tearDownClass(cls): - if os.path.exists(cls.data_path): - shutil.rmtree(cls.data_path) - - if os.path.exists(cls.output_path): - shutil.rmtree(cls.output_path) - - if os.path.exists(cls.pretrained_model_path): - shutil.rmtree(cls.pretrained_model_path) - - if os.path.exists(cls.ckpt_path): - shutil.rmtree(cls.ckpt_path) - - if os.path.exists(cls.finetune_model_config_path): - os.remove(cls.finetune_model_config_path) - - if os.path.exists(cls.evaluate_infer_model_config_path): - os.remove(cls.evaluate_infer_model_config_path) - - if os.path.exists(cls.finetune_boot_path): - os.remove(cls.finetune_boot_path) - - if os.path.exists(cls.evaluate_boot_path): - os.remove(cls.evaluate_boot_path) - - if os.path.exists(cls.infer_boot_path): - os.remove(cls.infer_boot_path) - - if os.path.exists(cls.root_path): - shutil.rmtree(cls.root_path) - - @classmethod - def _chmod(cls, path): - """ - 修正path的权限 - :param path:待修正路径 - """ - os.chmod(path, 0o750) - - def test_finetune_cli(self): - """ - 测试CLI侧finetune接口的功能 - """ - logging.info('Start test_finetune_cli.') - - cmd = self._create_finetune_command() - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Finetune successfully', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli.') - - def test_evaluate_cli(self): - """ - 测试CLI侧evaluate接口的功能 - """ - logging.info('Start test_evaluate_cli.') - - cmd = self._create_evaluate_infer_command(task_type='evaluate') - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIn('Evaluate successfully', str(std_out, encoding='utf-8')) - - logging.info('Finish test_evaluate_cli.') - - def test_infer_cli(self): - """ - 测试CLI侧infer接口的功能 - """ - logging.info('Start test_infer_cli.') - - cmd = self._create_evaluate_infer_command(task_type='infer') - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIn('Infer successfully', str(std_out, encoding='utf-8')) - - logging.info('Finish test_infer_cli.') - - def test_handle_exception_log_with_click_abort_exception(self): - """ - 测试CLI侧finetune接口抛出click.exception.Abort异常时的功能 - """ - logging.info('Start test_handle_exception_log_with_click_abort_exception.') - - boot_file_path = os.path.join(self.root_path, 'tmp_boot_file.py') - with os.fdopen(os.open(boot_file_path, FLAG, MODE), 'w') as file: - file.write('import click; raise click.exceptions.Abort') - - cmd = self._create_finetune_command() - cmd[cmd.index('--boot_file_path') + 1] = boot_file_path - - process = self._create_subprocess(cmd) - std_out, _ = process.communicate(timeout=60) - - self.assertIn('Finetune failed', str(std_out, encoding='utf-8')) - - logging.info('Finish test_handle_exception_log_with_click_abort_exception.') - - def test_handle_exception_log_with_click_no_such_option_exception(self): - """ - 测试CLI侧finetune接口抛出click.exception.NoSuchOption异常时的功能 - """ - logging.info('Start test_handle_exception_log_with_click_no_such_option_exception.') - - boot_file_path = os.path.join(self.root_path, 'tmp_boot_file.py') - with os.fdopen(os.open(boot_file_path, FLAG, MODE), 'w') as file: - file.write('import click; raise click.exceptions.NoSuchOption') - - cmd = self._create_finetune_command() - cmd[cmd.index('--boot_file_path') + 1] = boot_file_path - - process = self._create_subprocess(cmd) - std_out, _ = process.communicate(timeout=60) - - self.assertIn('Finetune failed', str(std_out, encoding='utf-8')) - - logging.info('Finish test_handle_exception_log_with_click_no_such_option_exception.') - - def test_handle_exception_log_with_click_missing_parameter_exception(self): - """ - 测试CLI侧finetune接口抛出click.exception.MissingParameter异常时的功能 - """ - logging.info('Start test_handle_exception_log_with_click_missing_parameter_exception.') - - boot_file_path = os.path.join(self.root_path, 'tmp_boot_file.py') - with os.fdopen(os.open(boot_file_path, FLAG, MODE), 'w') as file: - file.write('import click; raise click.exceptions.MissingParameter') - - cmd = self._create_finetune_command() - cmd[cmd.index('--boot_file_path') + 1] = boot_file_path - - process = self._create_subprocess(cmd) - std_out, _ = process.communicate(timeout=60) - - self.assertIn('Finetune failed', str(std_out, encoding='utf-8')) - - logging.info('Finish test_handle_exception_log_with_click_missing_parameter_exception.') - - def test_handle_exception_log_with_exception_without_err_msg(self): - """ - 测试CLI侧finetune接口抛出不包含异常信息的Exception时的功能 - """ - logging.info('Start test_handle_exception_log_with_exception_without_err_msg.') - - boot_file_path = os.path.join(self.root_path, 'tmp_boot_file.py') - with os.fdopen(os.open(boot_file_path, FLAG, MODE), 'w') as file: - file.write('raise RuntimeError') - - cmd = self._create_finetune_command() - cmd[cmd.index('--boot_file_path') + 1] = boot_file_path - - process = self._create_subprocess(cmd) - std_out, _ = process.communicate(timeout=60) - - self.assertIn('Finetune failed', str(std_out, encoding='utf-8')) - - logging.info('Finish test_handle_exception_log_with_exception_without_err_msg.') - - def test_handle_exception_log_with_exception_with_err_msg(self): - """ - 测试CLI侧finetune接口抛出包含异常信息的Exception时的功能 - """ - logging.info('Start test_handle_exception_log_with_exception_with_err_msg.') - - boot_file_path = os.path.join(self.root_path, 'tmp_boot_file.py') - with os.fdopen(os.open(boot_file_path, FLAG, MODE), 'w') as file: - file.write('raise RuntimeError(\'runtime error occurred\')') - - cmd = self._create_finetune_command() - cmd[cmd.index('--boot_file_path') + 1] = boot_file_path - - process = self._create_subprocess(cmd) - std_out, _ = process.communicate(timeout=60) - - self.assertIn('Finetune failed', str(std_out, encoding='utf-8')) - - logging.info('Finish test_handle_exception_log_with_exception_with_err_msg.') - - def test_finetune_cli_without_quiet(self): - """ - 测试CLI侧非安静模式的finetune接口的功能 - """ - logging.info('Start test_finetune_cli_without_quiet.') - - cmd = ['tk', - 'finetune', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1d1h'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Finetune successfully', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_without_quiet.') - - def test_finetune_cli_with_quiet_wrong_position(self): - """ - 测试CLI侧finetune接口中--quiet参数在非首位的情况 - """ - logging.info('Start test_finetune_cli_with_quiet_wrong_position.') - - cmd = ['tk', - 'finetune', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--quiet', - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1d1h'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Param [--quiet] should be set first.', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_quiet_wrong_position.') - - def test_finetune_cli_with_invalid_timeout_hour(self): - """ - 测试CLI侧finetune接口传入--timeout参数中包含不合法小时数 - """ - logging.info('Start test_finetune_cli_with_invalid_timeout_hour.') - - cmd = ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1d30h'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Invalid param [timeout].', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_invalid_timeout_hour.') - - def test_finetune_cli_with_zero_timeout_hour(self): - """ - 测试CLI侧finetune接口传入--timeout参数值为0的情况 - """ - logging.info('Start test_finetune_cli_with_zero_timeout_hour.') - - cmd = ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '0d0h'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Invalid param [timeout].', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_zero_timeout_hour.') - - def test_finetune_cli_with_timeout_day_limit(self): - """ - 测试CLI侧finetune接口传入--timeout参数值仅包含天数限制的情况 - """ - logging.info('Start test_finetune_cli_with_timeout_day_limit.') - - cmd = ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1d'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Finetune successfully.', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_timeout_day_limit.') - - def test_finetune_cli_with_timeout_hour_limit(self): - """ - 测试CLI侧finetune接口传入--timeout参数值仅包含小时数限制的情况 - """ - logging.info('Start test_finetune_cli_with_timeout_hour_limit.') - - cmd = ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1h'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Finetune successfully.', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_timeout_hour_limit.') - - def test_finetune_cli_with_invalid_timeout_format(self): - """ - 测试CLI侧finetune接口传入--timeout参数值不符合约束格式的情况 - """ - logging.info('Start test_finetune_cli_with_invalid_timeout_format.') - - cmd = ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', 'd1h1'] - - process = self._create_subprocess(cmd) - std_out, std_err = process.communicate(timeout=60) - - self.assertIsNone(std_err) - self.assertIn('Invalid param [timeout].', str(std_out, encoding='utf-8')) - - logging.info('Finish test_finetune_cli_with_invalid_timeout_format.') - - def _create_finetune_command(self): - """ - 组装finetune cmd指令 - :return: cmd指令 - """ - return ['tk', - 'finetune', - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--pretrained_model_path', self.pretrained_model_path, - '--model_config_path', self.finetune_model_config_path, - '--boot_file_path', self.finetune_boot_path, - '--timeout', '1d1h'] - - def _create_evaluate_infer_command(self, task_type): - """ - 组装evaluate/infer cmd指令 - :return: cmd指令 - """ - return ['tk', - task_type, - '--quiet', - '--data_path', self.data_path, - '--output_path', self.output_path, - '--ckpt_path', self.pretrained_model_path, - '--model_config_path', self.evaluate_infer_model_config_path, - '--boot_file_path', self.evaluate_boot_path if task_type == 'evaluate' else self.infer_boot_path, - '--timeout', '1d1h'] - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/entry/test_tk_sdk.py b/test/unit_test/entry/test_tk_sdk.py deleted file mode 100644 index 6aefbd4..0000000 --- a/test/unit_test/entry/test_tk_sdk.py +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import os -import shutil -import unittest -import pytest -import mindpet.tk_sdk as sdk -from mindpet.utils.constants import DEFAULT_FLAGS, DEFAULT_MODES - - -class TestTkSdk(unittest.TestCase): - root_path = None - data_path = None - output_path = None - pretrained_model_path = None - ckpt_path = None - finetune_model_config_path = None - evaluate_infer_model_config_path = None - finetune_boot_path = None - evaluate_boot_path = None - infer_boot_path = None - - @classmethod - def setUpClass(cls): - cls.root_path = os.path.join('/', 'tmp', 'test_tk_main') - if not os.path.exists(cls.root_path): - os.makedirs(cls.root_path) - - cls.data_path = os.path.join(cls.root_path, 'data_path') - if not os.path.exists(cls.data_path): - os.makedirs(cls.data_path) - - cls.output_path = os.path.join(cls.root_path, 'output_path') - if not os.path.exists(cls.output_path): - os.makedirs(cls.output_path) - - cls.pretrained_model_path = os.path.join(cls.root_path, 'pretrained_model_path') - if not os.path.exists(cls.pretrained_model_path): - os.makedirs(cls.pretrained_model_path) - - cls.ckpt_path = os.path.join(cls.root_path, 'ckpt_path') - if not os.path.exists(cls.ckpt_path): - os.makedirs(cls.ckpt_path) - - src_finetune_model_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/model_config/model_config_finetune.yaml') - - src_evaluate_infer_model_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/model_config/model_config_evaluate_infer.yaml') - - src_finetune_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_finetune.py') - - src_evaluate_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_evaluate.py') - - src_infer_boot_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file/boot_infer.py') - - cls.finetune_model_config_path = os.path.join(cls.root_path, 'model_config_finetune.yaml') - cls.evaluate_infer_model_config_path = os.path.join(cls.root_path, 'model_config_evaluate_infer.yaml') - cls.finetune_boot_path = os.path.join(cls.root_path, 'boot_finetune.py') - cls.evaluate_boot_path = os.path.join(cls.root_path, 'boot_evaluate.py') - cls.infer_boot_path = os.path.join(cls.root_path, 'boot_infer.py') - - shutil.copyfile(src_finetune_model_config_path, cls.finetune_model_config_path) - shutil.copyfile(src_evaluate_infer_model_config_path, cls.evaluate_infer_model_config_path) - shutil.copyfile(src_finetune_boot_path, cls.finetune_boot_path) - shutil.copyfile(src_evaluate_boot_path, cls.evaluate_boot_path) - shutil.copyfile(src_infer_boot_path, cls.infer_boot_path) - - cls._chmod(cls.data_path) - cls._chmod(cls.output_path) - cls._chmod(cls.pretrained_model_path) - cls._chmod(cls.ckpt_path) - cls._chmod(cls.finetune_model_config_path) - cls._chmod(cls.evaluate_infer_model_config_path) - cls._chmod(cls.finetune_boot_path) - cls._chmod(cls.evaluate_boot_path) - cls._chmod(cls.infer_boot_path) - - @classmethod - def tearDownClass(cls): - if os.path.exists(cls.data_path): - shutil.rmtree(cls.data_path) - - if os.path.exists(cls.output_path): - shutil.rmtree(cls.output_path) - - if os.path.exists(cls.pretrained_model_path): - shutil.rmtree(cls.pretrained_model_path) - - if os.path.exists(cls.ckpt_path): - shutil.rmtree(cls.ckpt_path) - - if os.path.exists(cls.finetune_model_config_path): - os.remove(cls.finetune_model_config_path) - - if os.path.exists(cls.evaluate_infer_model_config_path): - os.remove(cls.evaluate_infer_model_config_path) - - if os.path.exists(cls.finetune_boot_path): - os.remove(cls.finetune_boot_path) - - if os.path.exists(cls.evaluate_boot_path): - os.remove(cls.evaluate_boot_path) - - if os.path.exists(cls.infer_boot_path): - os.remove(cls.infer_boot_path) - - if os.path.exists(cls.root_path): - shutil.rmtree(cls.root_path) - - @classmethod - def _chmod(cls, path): - """ - 修正path的权限 - :param path:待修正路径 - """ - os.chmod(path, 0o750) - - def test_finetune_sdk(self): - """ - 测试SDK侧finetune接口 - """ - logging.info('Start test_finetune_sdk.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertTrue(result) - - logging.info('Finish test_finetune_sdk.') - - def test_evaluate_sdk(self): - """ - 测试SDK侧evaluate接口 - """ - logging.info('Start test_evaluate_sdk.') - - result = sdk.evaluate(data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.evaluate_infer_model_config_path, - boot_file_path=self.evaluate_boot_path) - - self.assertEqual(result.get('status'), 0) - self.assertEqual(result.get('task_result'), 'evaluate task success.') - - logging.info('Finish test_evaluate_sdk.') - - def test_infer_sdk(self): - """ - 测试SDK侧infer接口 - """ - logging.info('Start test_infer_sdk.') - - result = sdk.infer(data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.evaluate_infer_model_config_path, - boot_file_path=self.infer_boot_path) - - self.assertEqual(result.get('status'), 0) - self.assertEqual(result.get('task_result'), 'infer task success.') - - logging.info('Finish test_infer_sdk.') - - def test_finetune_sdk_with_default_params(self): - """ - 测试SDK侧finetune接口, 在部分参数以非索引指定下的情况 - """ - logging.info('Start test_finetune_sdk_with_default_params.') - - result = sdk.finetune(self.data_path, - self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertTrue(result) - - logging.info('Finish test_finetune_sdk_with_default_params.') - - def test_finetune_sdk_with_param_quiet(self): - """ - 测试SDK侧finetune接口, 在传入参数quiet时是否正确触发异常 - """ - logging.info('Start test_finetune_sdk_with_param_quiet.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path, - quiet=True) - - self.assertFalse(result) - - logging.info('Finish test_finetune_sdk_with_param_quiet.') - - def test_start_by_task_type_with_invalid_task_type(self): - """ - 测试SDK侧start_by_task_type传入不合理task_type时是否及时触发异常 - """ - logging.info('Start test_start_by_task_type_with_invalid_task_type.') - - args = None - kwargs = {'data_path': self.data_path} - - result = sdk.start_by_task_type(args, kwargs, task_type='others', ret_err_msg=False) - - self.assertFalse(result) - - logging.info('Finish test_start_by_task_type_with_invalid_task_type.') - - def test_finetune_with_none_boot_file_path(self): - """ - 测试SDK侧finetune功能不传boot_file_path情况下的处理 - """ - logging.info('Start test_finetune_with_none_boot_file_path.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=None) - - self.assertFalse(result) - - logging.info('Finish test_finetune_with_none_boot_file_path.') - - def test_finetune_with_boot_file_path_invalid_file_type(self): - """ - 测试SDK侧finetune功能传非.py结尾的boot_file_path情况 - """ - logging.info('Start test_finetune_with_boot_file_path_invalid_file_type.') - - temp_boot_file_path = os.path.join(self.root_path, 'temp_boot_file.ppp') - with os.fdopen(os.open(temp_boot_file_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - file.write('temp boot file with invalid file type.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=temp_boot_file_path) - - self.assertFalse(result) - - if os.path.exists(temp_boot_file_path): - os.remove(temp_boot_file_path) - - logging.info('Finish test_finetune_with_boot_file_path_invalid_file_type.') - - def test_evaluate_with_none_ckpt_path(self): - """ - 测试SDK侧evaluate功能不传ckpt_path的情况 - """ - logging.info('Start test_evaluate_with_none_ckpt_path.') - - result = sdk.evaluate(data_path=self.data_path, - output_path=self.output_path, - ckpt_path=None, - model_config_path=self.evaluate_infer_model_config_path, - boot_file_path=self.evaluate_boot_path) - - self.assertFalse(result) - - logging.info('Finish test_evaluate_with_none_ckpt_path.') - - def test_finetune_with_none_data_path(self): - """ - 测试SDK侧finetune功能不传data_path参数的情况 - """ - logging.info('Start test_finetune_with_none_data_path.') - - result = sdk.finetune(data_path=None, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertFalse(result) - - logging.info('Finish test_finetune_with_none_data_path.') - - def test_finetune_with_none_model_config_path(self): - """ - 测试SDK侧finetune功能不传model_config_path参数的情况 - """ - logging.info('Start test_finetune_with_none_model_config_path.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=None, - boot_file_path=self.finetune_boot_path) - - self.assertTrue(result) - - logging.info('Finish test_finetune_with_none_model_config_path.') - - def test_finetune_with_model_config_path_invalid_file_type(self): - """ - 测试SDK侧finetune功能传非.yaml/.yml结尾的model_config_path的情况 - """ - logging.info('Start test_finetune_with_model_config_path_invalid_file_type.') - - temp_model_config_path = os.path.join(self.root_path, 'temp_model_config.yyy') - with os.fdopen(os.open(temp_model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - file.write('temp model config file with invalid file type.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=temp_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertFalse(result) - - logging.info('Finish test_finetune_with_model_config_path_invalid_file_type.') - - def test_finetune_with_none_output_path(self): - """ - 测试SDK侧finetune功能不传output参数的情况 - """ - logging.info('Start test_finetune_with_none_output_path.') - - result = sdk.finetune(data_path=self.data_path, - output_path=None, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertFalse(result) - - logging.info('Finish test_finetune_with_none_output_path.') - - def test_finetune_with_none_pretrained_model_path(self): - """ - 测试SDK侧finetune功能不传pretrained_model_path参数的情况 - """ - logging.info('Start test_finetune_with_none_pretrained_model_path.') - - result = sdk.finetune(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=None, - model_config_path=self.finetune_model_config_path, - boot_file_path=self.finetune_boot_path) - - self.assertTrue(result) - - logging.info('Finish test_finetune_with_none_pretrained_model_path.') - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/graph/data/train-images.idx3-ubyte b/test/unit_test/graph/data/train-images.idx3-ubyte old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/data/train-labels.idx1-ubyte b/test/unit_test/graph/data/train-labels.idx1-ubyte old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/output/checkpoint_base-1_3.ckpt b/test/unit_test/graph/output/checkpoint_base-1_3.ckpt old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/output/checkpoint_base-graph.meta b/test/unit_test/graph/output/checkpoint_base-graph.meta old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/resource/test_freeze_config_file.yaml b/test/unit_test/graph/resource/test_freeze_config_file.yaml old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/resource/test_freeze_config_file_include_not_list.yaml b/test/unit_test/graph/resource/test_freeze_config_file_include_not_list.yaml old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/resource/test_freeze_config_file_no_freeze_key.yaml b/test/unit_test/graph/resource/test_freeze_config_file_no_freeze_key.yaml old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/resource/test_freeze_config_file_no_include_and_exclude.yaml b/test/unit_test/graph/resource/test_freeze_config_file_no_include_and_exclude.yaml old mode 100644 new mode 100755 diff --git a/test/unit_test/graph/test_freeze_utils.py b/test/unit_test/graph/test_freeze_utils.py old mode 100644 new mode 100755 index 4871ec1..d599d54 --- a/test/unit_test/graph/test_freeze_utils.py +++ b/test/unit_test/graph/test_freeze_utils.py @@ -5,6 +5,8 @@ """ 功能: 冻结功能单元测试模块 """ +import sys +sys.path.append('.') import logging import os import unittest @@ -14,6 +16,7 @@ from mindspore import nn from mindpet.graph.freeze_utils import freeze_modules, freeze_delta, freeze_from_config from mindpet.utils.exceptions import ModelConfigFreezeInfoError +from mindformers.modules import Transformer logging.getLogger().setLevel(logging.INFO) @@ -337,10 +340,10 @@ class SimpleNetwork(nn.Cell): def __init__(self): super(SimpleNetwork, self).__init__() self.embedding = nn.Embedding(10, 5, True) - self.transformer = nn.Transformer(batch_size=2, encoder_layers=1, decoder_layers=2, hidden_size=64, + self.transformer = Transformer(batch_size=2, encoder_layers=1, decoder_layers=2, hidden_size=64, ffn_hidden_size=64, src_seq_length=20, tgt_seq_length=10) - self.tk_delta_lora = nn.Dense(10, 5) - self.tk_delta_prefixtuning = nn.Dense(10, 5) + self.mindpet_delta_lora = nn.Dense(10, 5) + self.mindpet_delta_prefixtuning = nn.Dense(10, 5) self.dense = nn.Dense(10, 5) self.relu = nn.ReLU() diff --git a/test/unit_test/graph/test_save_ckpt.py b/test/unit_test/graph/test_save_ckpt.py old mode 100644 new mode 100755 index d1b43ae..ec2302b --- a/test/unit_test/graph/test_save_ckpt.py +++ b/test/unit_test/graph/test_save_ckpt.py @@ -5,6 +5,8 @@ """ 功能: 保存可训练参数功能单元测试模块 """ +import sys +sys.path.append('.') import os import shutil import logging @@ -297,7 +299,7 @@ def test_ckpt_num(self): logging.info('Finish test_ckpt_num.') - @mock.patch("tk.graph.ckpt_util.TrainableParamsCheckPoint._check_save_ckpt") + @mock.patch("mindpet.graph.ckpt_util.TrainableParamsCheckPoint._check_save_ckpt") def test_check_save_ckpt(self, mock_func): logging.info('Start test_check_save_ckpt.') ckpt_path = os.path.join(cur_dir, "temp") @@ -396,8 +398,6 @@ def test_enable_ge(self, mock_api, mock_func): train(params_check_point, enable=True) self.assertTrue(os.path.exists(ckpt_path)) - meta_path = os.path.join(ckpt_path, "checkpoint_delta-graph.meta") - self.assertTrue(os.path.exists(meta_path)) shutil.rmtree(ckpt_path, ignore_errors=True) @@ -426,3 +426,6 @@ def step_end(self, run_context): if __name__ == '__main__': pytest.main(['-s', os.path.abspath(__file__)]) + +# Set source attribute for function TestNet.construct to support run so or pyc file in Graph Mode. +setattr(TestNet.construct, 'source', ([' def construct(self, value):\n', ' # 使用定义好的运算构建前向网络\n', ' value = mindspore.ops.reshape(value, (value.shape[0], -1))\n', ' value = self.fc_temp(value)\n', ' return value\n'], 56)) diff --git a/test/unit_test/log/test_concurrent_log.py b/test/unit_test/log/test_concurrent_log.py old mode 100644 new mode 100755 index 04f0ca8..fc83627 --- a/test/unit_test/log/test_concurrent_log.py +++ b/test/unit_test/log/test_concurrent_log.py @@ -2,7 +2,8 @@ # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import shutil import time diff --git a/test/unit_test/log/test_logger.py b/test/unit_test/log/test_logger.py old mode 100644 new mode 100755 index 63b4bab..601618a --- a/test/unit_test/log/test_logger.py +++ b/test/unit_test/log/test_logger.py @@ -2,7 +2,8 @@ # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import shutil import logging @@ -15,7 +16,7 @@ from mindpet.utils.exceptions import MakeDirError, UnsupportedPlatformError, PathOwnerError, PathModeError from mindpet.utils.constants import DEFAULT_MAX_LOG_FILE_NUM, EMPTY_STRING -MODE640 = 0o640 +MODE740 = 0o740 CONFIG_TEST_BASE_DIR = os.path.expanduser('~/.cache/Huawei/mxTuningKit/log') CONFIG_TEST_NODE_DIR = os.path.realpath(os.path.join(CONFIG_TEST_BASE_DIR, 'node_0')) @@ -34,7 +35,7 @@ class TestLogger(TestCase): @classmethod def setUpClass(cls) -> None: - cls.temp_path = '/temp_path' + cls.temp_path = './temp_path' cls.full_path = "test" def test_logger_level(self): @@ -113,30 +114,6 @@ def test_log_content_sapce_check(self): self.assertEqual(replace_args, " ") logging.info('Finish test_log_content_sapce_check.') - def test_wrap_local_working_directory_path_owner(self): - logging.info('Start test_wrap_local_working_directory_path_owner.') - temp_path = self.temp_path - os.environ['HOME'] = temp_path - if not os.path.exists(temp_path): - os.makedirs(temp_path, exist_ok=True) - os.chown(temp_path, uid=100, gid=-1) - with self.assertRaises(PathOwnerError): - wrap_local_working_directory("test") - shutil.rmtree(temp_path) - logging.info('Finish test_wrap_local_working_directory_path_owner.') - - def test_wrap_local_working_directory_path_rights(self): - logging.info('Start test_wrap_local_working_directory_path_rights.') - temp_path = self.temp_path - os.environ['HOME'] = temp_path - if not os.path.exists(temp_path): - os.makedirs(temp_path, exist_ok=True) - os.chmod(temp_path, 0o752) - with self.assertRaises(PathModeError): - wrap_local_working_directory("test") - os.rmdir(temp_path) - logging.info('Finish test_wrap_local_working_directory_path_rights.') - def test_wrap_local_working_directory_none(self): logging.info('Start test_wrap_local_working_directory_none.') with self.assertRaises(ValueError): @@ -168,7 +145,7 @@ def test_wrap_local_working_directory_success(self): logging.info('Start test_wrap_local_working_directory_success.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.makedirs(temp_path, exist_ok=True, mode=MODE640) + os.makedirs(temp_path, exist_ok=True, mode=MODE740) full_path = self.full_path working_directory = wrap_local_working_directory(full_path) self.assertTrue(os.path.exists(working_directory)) @@ -179,9 +156,9 @@ def test_wrap_local_working_directory_with_config(self): logging.info('Start test_wrap_local_working_directory_with_config.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.makedirs(temp_path, exist_ok=True, mode=MODE640) + os.makedirs(temp_path, exist_ok=True, mode=MODE740) full_path = self.full_path - path_config = {"path": "test/temp", "rule": MODE640} + path_config = {"path": "test/temp", "rule": MODE740} working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) self.assertTrue(os.path.exists(working_directory)) shutil.rmtree(temp_path) @@ -191,10 +168,10 @@ def test_wrap_local_working_directory_with_config_file_exist(self): logging.info('Start test_wrap_local_working_directory_with_config_file_exist.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.makedirs(temp_path, exist_ok=True, mode=MODE640) + os.makedirs(temp_path, exist_ok=True, mode=MODE740) full_path = self.full_path - path_config = {"path": "test/temp", "rule": MODE640} - os.makedirs("/temp_path/.cache/Huawei/mxTuningKit/test/temp/test", exist_ok=True) + path_config = {"path": "test/temp", "rule": MODE740} + os.makedirs("./temp_path/.cache/Huawei/mxTuningKit/test/temp/test", exist_ok=True) working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) self.assertTrue(os.path.exists(working_directory)) shutil.rmtree(temp_path) @@ -204,13 +181,13 @@ def test_wrap_local_working_directory_with_config_illegal(self): logging.info('Start test_wrap_local_working_directory_with_config_illegal.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.makedirs(temp_path, exist_ok=True, mode=MODE640) + os.makedirs(temp_path, exist_ok=True, mode=MODE740) full_path = self.full_path path_config = {"path": "test"} with self.assertRaises(ValueError): working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) - path_config = {"rule": MODE640} + path_config = {"rule": MODE740} with self.assertRaises(ValueError): working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) @@ -222,7 +199,7 @@ def test_wrap_local_working_directory_platfrom_not_linux(self, mock_func): logging.info('Start test_wrap_local_working_directory_platfrom_not_linux.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.makedirs(temp_path, exist_ok=True, mode=MODE640) + os.makedirs(temp_path, exist_ok=True, mode=MODE740) full_path = self.full_path mock_func.return_value = "windows" @@ -236,7 +213,7 @@ def test_wrap_local_working_directory_full_path_fail(self, mock_func): logging.info('Start test_wrap_local_working_directory_full_path_fail.') temp_path = self.temp_path os.environ['HOME'] = temp_path - os.mkdir(temp_path, mode=MODE640) + os.mkdir(temp_path, mode=MODE740) full_path = self.full_path mock_func.side_effect = RuntimeError with self.assertRaises(MakeDirError): @@ -250,12 +227,12 @@ def test_wrap_local_working_directory_full_path_with_config_fail(self, mock_func temp_path = self.temp_path full_path = self.full_path os.environ['HOME'] = temp_path - os.mkdir(temp_path, mode=MODE640) - os.mkdir("/temp_path/.cache", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei/mxTuningKit", mode=MODE640) - os.makedirs("/temp_path/.cache/Huawei/mxTuningKit", exist_ok=True, mode=MODE640) - path_config = {"path": "test/temp", "rule": MODE640} + os.mkdir(temp_path, mode=MODE740) + os.mkdir("./temp_path/.cache", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei/mxTuningKit", mode=MODE740) + os.makedirs("./temp_path/.cache/Huawei/mxTuningKit", exist_ok=True, mode=MODE740) + path_config = {"path": "test/temp", "rule": MODE740} mock_func.side_effect = RuntimeError with self.assertRaises(MakeDirError): working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) @@ -268,13 +245,13 @@ def test_wrap_local_working_directory_full_path_with_wrap_fail(self, mock_func): temp_path = self.temp_path full_path = self.full_path os.environ['HOME'] = temp_path - os.mkdir(temp_path, mode=MODE640) - os.mkdir("/temp_path/.cache", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei/mxTuningKit", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei/mxTuningKit/test", mode=MODE640) - os.mkdir("/temp_path/.cache/Huawei/mxTuningKit/test/temp", mode=MODE640) - path_config = {"path": "test/temp", "rule": MODE640} + os.mkdir(temp_path, mode=MODE740) + os.mkdir("./temp_path/.cache", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei/mxTuningKit", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei/mxTuningKit/test", mode=MODE740) + os.mkdir("./temp_path/.cache/Huawei/mxTuningKit/test/temp", mode=MODE740) + path_config = {"path": "test/temp", "rule": MODE740} mock_func.side_effect = RuntimeError with self.assertRaises(MakeDirError): working_directory = wrap_local_working_directory(full_path, specific_path_config=path_config) @@ -283,6 +260,7 @@ def test_wrap_local_working_directory_full_path_with_wrap_fail(self, mock_func): def test_get_file_path_list_base_dir_none(self): logging.info('Start test_get_file_path_list_base_dir_none.') + const.get_local_default_log_file_dir() base_save_dir = os.path.expanduser(const.local_default_log_file_dir) file_path = os.path.join(base_save_dir, "test.log") path_list = get_file_path_list(base_save_dir=None, append_rank_dir=False, server_id=0, rank_id=0, @@ -290,10 +268,11 @@ def test_get_file_path_list_base_dir_none(self): self.assertIn(file_path, path_list) logging.info('Finish test_get_file_path_list_base_dir_none.') - @mock.patch("tk.log.log.check_link_path") + @mock.patch("mindpet.log.log.check_link_path") def test_get_file_path_list_base_dir_link_path(self, mock_func): logging.info('Start test_get_file_path_list_base_dir_link_path.') mock_func.return_value = True + const.get_local_default_log_file_dir() base_save_dir = os.path.expanduser(const.local_default_log_file_dir) file_path = os.path.join(base_save_dir, "node_0/device_0/test.log") path_list = get_file_path_list(base_save_dir=None, append_rank_dir=True, server_id=0, rank_id=0, diff --git a/test/unit_test/log/test_logger_validator.py b/test/unit_test/log/test_logger_validator.py old mode 100644 new mode 100755 index bb90932..23c723c --- a/test/unit_test/log/test_logger_validator.py +++ b/test/unit_test/log/test_logger_validator.py @@ -2,7 +2,8 @@ # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import copy import logging diff --git a/test/unit_test/run_test.sh b/test/unit_test/run_test.sh old mode 100644 new mode 100755 index 79e15ea..005f39b --- a/test/unit_test/run_test.sh +++ b/test/unit_test/run_test.sh @@ -84,7 +84,7 @@ function install_dependence() { function run_dt_test_cases() { info "Start getting testcase final result." - pytest -v --junit-xml=./final.xml --cov=tk --cov-report=html --cov-report=xml --disable-pytest-warnings \ + pytest -v --junit-xml=./final.xml --cov=mindpet --cov-report=html --cov-report=xml --disable-pytest-warnings \ --cov-branch --cache-clear mv .coverage coverage.xml final.xml htmlcov result diff --git a/test/unit_test/security/param_check/test_base_check.py b/test/unit_test/security/param_check/test_base_check.py old mode 100644 new mode 100755 index b3e1344..edfd962 --- a/test/unit_test/security/param_check/test_base_check.py +++ b/test/unit_test/security/param_check/test_base_check.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import logging import unittest diff --git a/test/unit_test/security/param_check/test_model_config_check_utils.py b/test/unit_test/security/param_check/test_model_config_check_utils.py old mode 100644 new mode 100755 index 70de6b3..f4fa57e --- a/test/unit_test/security/param_check/test_model_config_check_utils.py +++ b/test/unit_test/security/param_check/test_model_config_check_utils.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import logging import shutil import os.path diff --git a/test/unit_test/security/param_check/test_option_check_utils.py b/test/unit_test/security/param_check/test_option_check_utils.py old mode 100644 new mode 100755 index e122964..49d334d --- a/test/unit_test/security/param_check/test_option_check_utils.py +++ b/test/unit_test/security/param_check/test_option_check_utils.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import shutil import unittest @@ -978,31 +979,6 @@ def test_path_granularity_check_with_file(self): path_including_file=False) logging.info('Finish test_path_granularity_check_with_file.') - def test_path_right_escalation_check_with_different_owner(self): - """ - 测试路径权限提升校验, 在路径属主与实际调用用户不符合的情况 - """ - logging.info('Start test_path_right_escalation_check_with_different_owner.') - - path = os.path.join(LOCAL_PATH, 'path_right_escalation_check') - if not os.path.exists(path): - os.makedirs(path) - - os.chown(path, uid=100, gid=-1) - entrance_monitor.set_value(ENTRANCE_TYPE, 'SDK') - - with self.assertRaises(PathRightEscalationError): - PathRightEscalationCheck(option_name='path_right_escalation_check', - option_value=path, - mode='777', - force_quit=True, - quiet=False) - - if os.path.exists(path): - os.rmdir(path) - - logging.info('Finish test_path_right_escalation_check_with_different_owner.') - def test_path_right_escalation_check_with_none_mode(self): """ 测试路径权限提升校验, 参数mode传None值的情况 @@ -1225,8 +1201,7 @@ def test_file_size_check(self): with self.assertRaises(FileOversizeError): FileSizeCheck(option_name='file_size_check', option_value=full_path, - path_including_file=True, - max_file_size=10) + path_including_file=True) if os.path.exists(full_path): os.remove(full_path) diff --git a/test/unit_test/task/evaluate_infer/.test_result_file_check.py.swp b/test/unit_test/task/evaluate_infer/.test_result_file_check.py.swp deleted file mode 100644 index d90ff6b..0000000 Binary files a/test/unit_test/task/evaluate_infer/.test_result_file_check.py.swp and /dev/null differ diff --git a/test/unit_test/task/evaluate_infer/resource/boot_file_with_empty_json.py b/test/unit_test/task/evaluate_infer/resource/boot_file_with_empty_json.py deleted file mode 100644 index 7d06396..0000000 --- a/test/unit_test/task/evaluate_infer/resource/boot_file_with_empty_json.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import stat -import json -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - - content = '' - - output_path = args.output_path - result_file_path = os.path.join(output_path, 'eval_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/resource/boot_file_with_link_result_json.py b/test/unit_test/task/evaluate_infer/resource/boot_file_with_link_result_json.py deleted file mode 100644 index 3895759..0000000 --- a/test/unit_test/task/evaluate_infer/resource/boot_file_with_link_result_json.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import stat -import json -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - - content = 'evaluate task success' - - output_path = args.output_path - src_result_file_path = os.path.join(output_path, 'src_eval_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(src_result_file_path, flag, mode), 'w') as file: - json.dump(content, file) - - tar_result_file_path = os.path.join(output_path, 'eval_result.json') - - os.symlink(src_result_file_path, tar_result_file_path) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/resource/boot_file_with_oversize_result_json.py b/test/unit_test/task/evaluate_infer/resource/boot_file_with_oversize_result_json.py deleted file mode 100644 index 77a9254..0000000 --- a/test/unit_test/task/evaluate_infer/resource/boot_file_with_oversize_result_json.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import stat -import json -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - - content = '*' * 1024 * 1024 * 1025 # 生成1025MB文件 - - output_path = args.output_path - result_file_path = os.path.join(output_path, 'eval_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/resource/boot_file_with_runtime_error.py b/test/unit_test/task/evaluate_infer/resource/boot_file_with_runtime_error.py deleted file mode 100644 index be79ac3..0000000 --- a/test/unit_test/task/evaluate_infer/resource/boot_file_with_runtime_error.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -raise RuntimeError diff --git a/test/unit_test/task/evaluate_infer/resource/boot_file_without_generating_result_json.py b/test/unit_test/task/evaluate_infer/resource/boot_file_without_generating_result_json.py deleted file mode 100644 index 92c2d2b..0000000 --- a/test/unit_test/task/evaluate_infer/resource/boot_file_without_generating_result_json.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import argparse -import logging - -logging.getLogger().setLevel(logging.INFO) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - logging.info(args) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/resource/normal_evaluate_boot_file.py b/test/unit_test/task/evaluate_infer/resource/normal_evaluate_boot_file.py deleted file mode 100644 index bfcc894..0000000 --- a/test/unit_test/task/evaluate_infer/resource/normal_evaluate_boot_file.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import stat -import json -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - - content = 'evaluate task success' - - output_path = args.output_path - result_file_path = os.path.join(output_path, 'eval_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/resource/normal_infer_boot_file.py b/test/unit_test/task/evaluate_infer/resource/normal_infer_boot_file.py deleted file mode 100644 index a70ec2d..0000000 --- a/test/unit_test/task/evaluate_infer/resource/normal_infer_boot_file.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import stat -import json -import argparse - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--data_path', type=str) - parser.add_argument('--output_path', type=str) - parser.add_argument('--ckpt_path', type=str) - parser.add_argument('--lr', type=str) - args = parser.parse_args() - - content = 'infer task success' - - output_path = args.output_path - result_file_path = os.path.join(output_path, 'infer_result.json') - - flag = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - mode = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - with os.fdopen(os.open(result_file_path, flag, mode), 'w') as file: - json.dump(content, file) - - -if __name__ == '__main__': - main() diff --git a/test/unit_test/task/evaluate_infer/test.sh b/test/unit_test/task/evaluate_infer/test.sh deleted file mode 100644 index 40edb1c..0000000 --- a/test/unit_test/task/evaluate_infer/test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -for (i=1;i<=1000;i++) -do - pytest test_evaluate_infer_task.py -done \ No newline at end of file diff --git a/test/unit_test/task/evaluate_infer/test_evaluate_infer_task.py b/test/unit_test/task/evaluate_infer/test_evaluate_infer_task.py deleted file mode 100644 index d5f2823..0000000 --- a/test/unit_test/task/evaluate_infer/test_evaluate_infer_task.py +++ /dev/null @@ -1,517 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import os -import json -import logging -import shutil -import unittest -import unittest.mock as mock -import pytest -import yaml -from mindpet.utils.constants import DEFAULT_FLAGS, DEFAULT_MODES -from mindpet.task.evaluate_infer.evaluate_infer_task import EvaluateInferTask -from mindpet.utils.exceptions import ReadYamlFileError, CreateProcessError, TaskError, MonitorProcessRspError, \ - PathRightEscalationError, UnexpectedError - -logging.getLogger().setLevel(logging.INFO) - - -class TestEvaluateInferTask(unittest.TestCase): - root_path = None - data_path = None - output_path = None - ckpt_path = None - model_config_path = None - evaluate_boot_file_path = None - infer_boot_file_path = None - - @classmethod - def setUpClass(cls): - cls.root_path = os.path.join('/', 'tmp', 'evaluate_infer') - if not os.path.exists(cls.root_path): - os.makedirs(cls.root_path, exist_ok=True) - - cls.data_path = os.path.join(cls.root_path, 'data_path') - if not os.path.exists(cls.data_path): - os.makedirs(cls.data_path) - - cls.output_path = os.path.join(cls.root_path, 'output_path') - if not os.path.exists(cls.output_path): - os.makedirs(cls.output_path) - - cls.ckpt_path = os.path.join(cls.root_path, 'ckpt_path') - if not os.path.exists(cls.ckpt_path): - os.makedirs(cls.ckpt_path) - - cls.model_config_path = os.path.join(cls.root_path, 'model_config.yaml') - model_config_content = {'params': {'lr': '1e-4'}} - with os.fdopen(os.open(cls.model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - yaml.dump(model_config_content, file) - - src_evaluate_boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/normal_evaluate_boot_file.py') - cls.evaluate_boot_file_path = os.path.join(cls.root_path, 'normal_evaluate_boot_file.py') - - src_infer_boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/normal_infer_boot_file.py') - cls.infer_boot_file_path = os.path.join(cls.root_path, 'normal_infer_boot_file.py') - - shutil.copyfile(src_evaluate_boot_file_path, cls.evaluate_boot_file_path) - shutil.copyfile(src_infer_boot_file_path, cls.infer_boot_file_path) - - @classmethod - def tearDownClass(cls): - if os.path.exists(cls.model_config_path): - os.remove(cls.model_config_path) - - if os.path.exists(cls.evaluate_boot_file_path): - os.remove(cls.evaluate_boot_file_path) - - if os.path.exists(cls.infer_boot_file_path): - os.remove(cls.infer_boot_file_path) - - if os.path.exists(cls.data_path): - shutil.rmtree(cls.data_path) - - if os.path.exists(cls.output_path): - shutil.rmtree(cls.output_path) - - if os.path.exists(cls.ckpt_path): - shutil.rmtree(cls.ckpt_path) - - if os.path.exists(cls.root_path): - shutil.rmtree(cls.root_path) - - def test_evaluate_infer_task_init_with_invalid_task_type(self): - """ - 测试EvaluateInferTask输入不存在task_type的情况 - """ - logging.info('Start test_evaluate_infer_task_init_with_invalid_task_type.') - - with self.assertRaises(ValueError): - EvaluateInferTask(task_type='others', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=self.evaluate_boot_file_path) - - logging.info('Finish test_evaluate_infer_task_init_with_invalid_task_type.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.EvaluateInferTask._process_param_and_command') - def test_evaluate_infer_task_init_with_process_param_and_command_throw_keyboard_interrupt(self, mock_func): - """ - 测试EvaluateInferTask在组装参数时触发KeyboardInterrupt异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_init_with_process_param_and_command_throw_keyboard_interrupt.') - - mock_func.side_effect = KeyboardInterrupt - with self.assertRaises(KeyboardInterrupt): - self._get_default_evaluate_task() - - logging.info('Finish test_evaluate_infer_task_init_with_process_param_and_command_throw_keyboard_interrupt.') - - def test_evaluate_task_init_with_invalid_model_config_path(self): - """ - 测试EvaluateInferTask在接收不存在model_config_path时的情况 - """ - logging.info('Start test_evaluate_task_init_without_model_config_path.') - - model_config_path = os.path.join(self.root_path, 'temp_model_config.yaml') - - with self.assertRaises(ReadYamlFileError): - EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=model_config_path, - boot_file_path=self.evaluate_boot_file_path) - - if os.path.exists(model_config_path): - os.remove(model_config_path) - - logging.info('Finish test_evaluate_task_init_without_model_config_path.') - - @mock.patch('subprocess.Popen') - def test_evaluate_infer_task_with_create_process_throw_keyboard_interrupt(self, mock_func): - """ - 测试EvaluateInferTask在创建子进程时抛出KeyboardInterrupt异常时的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_create_process_throw_keyboard_interrupt.') - - mock_func.side_effect = KeyboardInterrupt - task = self._get_default_evaluate_task() - with self.assertRaises(KeyboardInterrupt): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_create_process_throw_keyboard_interrupt.') - - @mock.patch('subprocess.Popen') - def test_evaluate_infer_task_with_create_process_throw_exception(self, mock_func): - """ - 测试EvaluateInferTask在创建子进程时抛出Exception类型异常时的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_create_process_throw_keyboard_interrupt.') - - mock_func.side_effect = RuntimeError - task = self._get_default_evaluate_task() - with self.assertRaises(CreateProcessError): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_create_process_throw_keyboard_interrupt.') - - @mock.patch('subprocess.Popen') - def test_evaluate_infer_task_with_create_process_throw_exception_containing_err_msg(self, mock_func): - """ - 测试EvaluateInferTask在创建子进程时抛出Exception类型异常时的情况, 其中异常包含信息 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_create_process_throw_exception_containing_err_msg.') - - mock_func.side_effect = RuntimeError('runtime error occurred!!!') - task = self._get_default_evaluate_task() - with self.assertRaises(CreateProcessError): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_create_process_throw_exception_containing_err_msg.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.monitor_process_rsp_code') - def test_evaluate_infer_task_with_monitor_process_rsp_code_throw_keyboard_interrupt(self, mock_func): - """ - 测试EvaluateInferTask在监测子任务状态时出现KeyboardInterrupt异常时的处理 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_monitor_process_rsp_code_throw_keyboard_interrupt.') - - mock_func.side_effect = KeyboardInterrupt - task = self._get_default_evaluate_task() - with self.assertRaises(KeyboardInterrupt): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_monitor_process_rsp_code_throw_keyboard_interrupt.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.monitor_process_rsp_code') - def test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception_containing_msg(self, mock_func): - """ - 测试EvaluateInferTask在监测子任务状态时出现Exception(包含异常信息)类型异常时的处理 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception_containing_msg.') - - mock_func.side_effect = RuntimeError('monitor process rsp code raises runtime error') - task = self._get_default_evaluate_task() - with self.assertRaises(MonitorProcessRspError): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception_containing_msg.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.monitor_process_rsp_code') - def test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception(self, mock_func): - """ - 测试EvaluateInferTask在监测子任务状态时出现Exception(不包含异常信息)类型异常时的处理 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception.') - - mock_func.side_effect = RuntimeError - task = self._get_default_evaluate_task() - with self.assertRaises(MonitorProcessRspError): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_monitor_process_rsp_code_throw_exception.') - - def test_evaluate_infer_task_with_abnormal_exit_code(self): - """ - 测试EvaluateInferTask在模型代码意外退出时的状态 - """ - logging.info('Start test_evaluate_infer_task_with_abnormal_exit_code.') - - boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file_with_runtime_error.py') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=boot_file_path) - - with self.assertRaises(TaskError): - task.start() - - logging.info('Finish test_evaluate_infer_task_with_abnormal_exit_code.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.read_json_file') - def test_evaluate_task_with_read_result_json_throw_json_decode_error(self, mock_func): - """ - 测试EvaluateInferTask在校验eval_result.json时读取json文件抛出JsonDecodeError异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_task_with_read_result_json_throw_json_decode_error.') - - mock_func.side_effect = json.JSONDecodeError - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'File eval_result.json should follow JSON format.') - - logging.info('Finish test_evaluate_task_with_read_result_json_throw_json_decode_error.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.read_json_file') - def test_evaluate_task_with_read_result_json_throw_exception(self, mock_func): - """ - 测试EvaluateInferTask在校验eval_result.json时读取json文件抛出Exception类型异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_task_with_read_result_json_throw_exception.') - - mock_func.side_effect = RuntimeError('read json file raise runtime error') - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), - 'An error occurred during reading eval_result.json: read json file raise runtime error') - - logging.info('Finish test_evaluate_task_with_read_result_json_throw_exception.') - - def test_evaluate_task_json_check_without_accessing_json_file(self): - """ - 测试EvaluateInferTask在模型代码不生成result_json的情况 - """ - logging.info('Start test_evaluate_task_json_check_without_accessing_json_file.') - - boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file_without_generating_result_json.py') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=boot_file_path) - - result = task.start() - - self.assertEqual(result.get('status'), -1) - - logging.info('Finish test_evaluate_task_json_check_without_accessing_json_file.') - - def test_evaluate_infer_task_json_check_with_empty_json_file(self): - """ - 测试EvaluateInferTask在模型生成result_json内容为空的情况 - """ - logging.info('Start test_evaluate_infer_task_json_check_with_empty_json_file.') - - boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file_with_empty_json.py') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=boot_file_path) - - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'File eval_result.json is empty.') - self.assertEqual(result.get('task_result'), '') - - logging.info('Finish test_evaluate_infer_task_json_check_with_empty_json_file.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.EvaluateInferTask._check_task_result') - def test_evaluate_infer_task_with_result_json_containing_right_escalation_risk(self, mock_func): - """ - 测试EvaluateInferTask在校验eval_result.json时发现存在提权风险时的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_result_json_containing_right_escalation_risk.') - - mock_func.side_effect = PathRightEscalationError - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'Permission denied when reading eval_result.json.') - - logging.info('Finish test_evaluate_infer_task_with_result_json_containing_right_escalation_risk.') - - def test_evaluate_infer_task_with_oversize_result_json(self): - """ - 测试EvaluateInferTask在模型生成过大result_json文件的情况 - """ - logging.info('Start test_evaluate_infer_task_with_oversize_result_json.') - - boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file_with_oversize_result_json.py') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=boot_file_path) - - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'File eval_result.json is too large.') - - logging.info('Finish test_evaluate_infer_task_with_oversize_result_json.') - - def test_evaluate_infer_task_with_link_result_json(self): - """ - 测试EvaluateInferTask在模型生成result_json路径为软链接时的情况 - """ - logging.info('Start test_evaluate_infer_task_with_link_result_json.') - - boot_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'resource/boot_file_with_link_result_json.py') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=boot_file_path) - - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'Detect link path, reject reading file: eval_result.json.') - - logging.info('Finish test_evaluate_infer_task_with_link_result_json.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.EvaluateInferTask._check_task_result') - def test_evaluate_infer_task_with_value_error_result_json(self, mock_func): - """ - 测试EvaluateInferTask在模型生成result_json时触发ValueError时的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_value_error_result_json.') - - mock_func.side_effect = ValueError - - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), 'Invalid file: eval_result.json.') - - logging.info('Finish test_evaluate_infer_task_with_value_error_result_json.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.EvaluateInferTask._check_task_result') - def test_evaluate_infer_task_with_exception_result_json(self, mock_func): - """ - 测试EvaluateInferTask在模型生成result_json时触发Exception时的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_exception_result_json.') - - mock_func.side_effect = RuntimeError('result.json runtime error!!!') - - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), -1) - self.assertEqual(result.get('error_message'), - 'An error occurred during reading eval_result.json: result.json runtime error!!!') - - logging.info('Finish test_evaluate_infer_task_with_exception_result_json.') - - def test_evaluate_infer_task_with_none_model_config_path(self): - """ - 测试EvaluateInferTask在不配置model_config_path的情况 - """ - logging.info('Start test_evaluate_infer_task_with_none_model_config_path.') - - task = EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=None, - boot_file_path=self.evaluate_boot_file_path) - - result = task.start() - - self.assertEqual(result.get('status'), 0) - - logging.info('Finish test_evaluate_infer_task_with_none_model_config_path.') - - @mock.patch('tk.task.evaluate_infer.evaluate_infer_task.read_yaml_file') - def test_evaluate_infer_task_with_read_model_config_yaml_throw_exception(self, mock_func): - """ - 测试EvaluateInferTask在从model_config.yaml中解析params参数时发生Exception类型异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_evaluate_infer_task_with_read_model_config_yaml_throw_exception.') - - mock_func.side_effect = RuntimeError - with self.assertRaises(UnexpectedError): - self._get_default_evaluate_task() - - logging.info('Finish test_evaluate_infer_task_with_read_model_config_yaml_throw_exception.') - - def test_evaluate_task(self): - """ - 正常执行evaluate任务 - """ - logging.info('Start test_evaluate_task.') - - task = self._get_default_evaluate_task() - result = task.start() - - self.assertEqual(result.get('status'), 0) - self.assertEqual(result.get('task_result'), 'evaluate task success') - - logging.info('Finish test_evaluate_task.') - - def test_infer_task(self): - """ - 正常执行infer任务 - """ - logging.info('Start test_infer_task.') - - task = self._get_default_infer_task() - result = task.start() - - self.assertEqual(result.get('status'), 0) - self.assertEqual(result.get('task_result'), 'infer task success') - - logging.info('Finish test_infer_task.') - - def _get_default_evaluate_task(self): - """ - 获取默认的evaluate任务对象 - :return: 任务对象 - """ - return EvaluateInferTask(task_type='evaluate', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=self.evaluate_boot_file_path) - - def _get_default_infer_task(self): - """ - 获取默认的infer任务对象 - :return: 任务对象 - """ - return EvaluateInferTask(task_type='infer', - data_path=self.data_path, - output_path=self.output_path, - ckpt_path=self.ckpt_path, - model_config_path=self.model_config_path, - boot_file_path=self.infer_boot_file_path) - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/task/evaluate_infer/test_result_file_check.py b/test/unit_test/task/evaluate_infer/test_result_file_check.py deleted file mode 100644 index fa23041..0000000 --- a/test/unit_test/task/evaluate_infer/test_result_file_check.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import os -import shutil -import stat -import json -import unittest -import pytest -from mindpet.utils.constants import SPACE_CHARACTER, PATH_MODE_LIMIT -from mindpet.security.param_check.option_check_utils import PathContentCheckParam -from mindpet.task.evaluate_infer.result_file_check import ResultFileCheckParam, ResultFileCheck -from mindpet.utils.exceptions import LinkPathError, FileOversizeError, PathRightEscalationError - - -class TestEvaluateResultCheck(unittest.TestCase): - def test_path_existence_check(self): - """ - 测试evaluate/infer结果json文件校验, 路径存在性校验 - """ - logging.info('Start test_path_existence_check.') - - test_value = os.path.join('/', 'tmp', 'evaluate_result.json') - - base_whitelist_mode = 'ALL' - extra_whitelist = ['.', '/', '-', '_', SPACE_CHARACTER] - - eval_result_check = ResultFileCheck(option_name='eval_path_existence_check', - option_value=test_value) - - eval_path_content_check_param = PathContentCheckParam(base_whitelist_mode=base_whitelist_mode, - extra_whitelist=extra_whitelist) - - eval_check_param = ResultFileCheckParam(path_content_check_param=eval_path_content_check_param, - mode=PATH_MODE_LIMIT, - path_including_file=True, - force_quit=True, - quiet=False) - - with self.assertRaises(FileNotFoundError): - eval_result_check.check(eval_check_param) - - logging.info('Finish test_path_existence_check.') - - def test_link_path_check(self): - """ - 测试evaluate/infer结果json文件校验, 软链接路径校验 - """ - logging.info('Start test_link_path_check.') - - root_path = os.path.join('/', 'tmp', 'evaluate_link_path_check') - - if not os.path.exists(root_path): - os.makedirs(root_path) - - source_file_path = os.path.join(root_path, 'source_evaluate_result.json') - - if os.path.exists(source_file_path): - os.remove(source_file_path) - - flags = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - modes = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - file_content = json.dumps({'test_key': 'test_value'}) - - with os.fdopen(os.open(source_file_path, flags, modes), 'w') as file: - file.write(file_content) - - target_file_path = os.path.join(root_path, 'target_evaluate_result.json') - - if os.path.exists(target_file_path): - os.remove(target_file_path) - - os.symlink(source_file_path, target_file_path) - - base_whitelist_mode = 'ALL' - extra_whitelist = ['.', '/', '-', '_', SPACE_CHARACTER] - - eval_result_check = ResultFileCheck(option_name='eval_path_existence_check', - option_value=target_file_path) - - eval_path_content_check_param = PathContentCheckParam(base_whitelist_mode=base_whitelist_mode, - extra_whitelist=extra_whitelist) - - eval_check_param = ResultFileCheckParam(path_content_check_param=eval_path_content_check_param, - mode=PATH_MODE_LIMIT, - path_including_file=True, - force_quit=True, - quiet=False) - - with self.assertRaises(LinkPathError): - eval_result_check.check(eval_check_param) - - if os.path.exists(root_path): - shutil.rmtree(root_path) - - logging.info('Finish test_link_path_check.') - - def test_path_right_escalation_check(self): - """ - 测试evaluate/infer结果json文件校验, 路径权限提升校验 - """ - logging.info('Start test_path_right_escalation_check.') - - root_path = os.path.join('/', 'tmp', 'evaluate_path_right_escalation_check') - test_value = os.path.join(root_path, 'evaluate_result.json') - - if not os.path.exists(root_path): - os.makedirs(root_path) - - if os.path.exists(test_value): - os.remove(test_value) - - flags = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - modes = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - file_content = json.dumps({'test_key': 'test_value'}) - - with os.fdopen(os.open(test_value, flags, modes), 'w') as file: - file.write(file_content) - - mode_777 = 0o777 - os.chmod(test_value, mode_777) - - base_whitelist_mode = 'ALL' - extra_whitelist = ['.', '/', '-', '_', SPACE_CHARACTER] - - eval_result_check = ResultFileCheck(option_name='evaluate_path_right_escalation_check', - option_value=test_value) - - eval_path_content_check_param = PathContentCheckParam(base_whitelist_mode=base_whitelist_mode, - extra_whitelist=extra_whitelist) - - eval_check_param = ResultFileCheckParam(path_content_check_param=eval_path_content_check_param, - mode=PATH_MODE_LIMIT, - path_including_file=True, - force_quit=True, - quiet=False) - - with self.assertRaises(PathRightEscalationError): - eval_result_check.check(eval_check_param) - - if os.path.exists(root_path): - shutil.rmtree(root_path) - - logging.info('Finish test_path_right_escalation_check.') - - def test_file_size_check(self): - """ - 测试evaluate/infer结果json文件校验, 文件大小校验 - """ - logging.info('Start test_file_size_check.') - - root_path = os.path.join('/', 'tmp', 'evaluate_path_right_escalation_check') - test_value = os.path.join(root_path, 'evaluate_result.json') - - if not os.path.exists(root_path): - os.makedirs(root_path) - - if os.path.exists(test_value): - os.remove(test_value) - - flags = os.O_RDWR | os.O_CREAT # 允许读写, 文件不存在时新建 - modes = stat.S_IWUSR | stat.S_IRUSR # 所有者读写 - - content_info = '*' * 1024 * 1024 * 1024 - - file_content = json.dumps({'test_key': content_info}) - - with os.fdopen(os.open(test_value, flags, modes), 'w') as file: - file.write(file_content) - - mode_750 = 0o750 - os.chmod(test_value, mode_750) - - base_whitelist_mode = 'ALL' - extra_whitelist = ['.', '/', '-', '_', SPACE_CHARACTER] - - eval_result_check = ResultFileCheck(option_name='evaluate_file_size_check', - option_value=test_value) - - eval_path_content_check_param = PathContentCheckParam(base_whitelist_mode=base_whitelist_mode, - extra_whitelist=extra_whitelist) - - eval_check_param = ResultFileCheckParam(path_content_check_param=eval_path_content_check_param, - mode=PATH_MODE_LIMIT, - path_including_file=True, - force_quit=True, - quiet=False) - - with self.assertRaises(FileOversizeError): - eval_result_check.check(eval_check_param) - - if os.path.exists(root_path): - shutil.rmtree(root_path) - - logging.info('Finish test_file_size_check.') - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/task/finetune/test_finetune_options_check.py b/test/unit_test/task/finetune/test_finetune_options_check.py deleted file mode 100644 index 9b84a32..0000000 --- a/test/unit_test/task/finetune/test_finetune_options_check.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import os -import unittest -import pytest -from mindpet.task.finetune.finetune_options_check import FinetuneOptionsCheck, FinetuneOptionsCheckParam -from mindpet.security.param_check.option_check_utils import PathLengthCheckParam, PathContentCheckParam -from mindpet.utils.constants import DEFAULT_PATH_LEN_MIN_LIMIT, DEFAULT_PATH_LEN_MAX_LIMIT, PATH_MODE_LIMIT, \ - DEFAULT_FLAGS, DEFAULT_MODES, DEFAULT_FILE_LEN_MIN_LIMIT, DEFAULT_FILE_LEN_MAX_LIMIT - - -def base_check(disk_free_space_check): - """ - finetune接口参数通用校验 - :param disk_free_space_check: 是否启动路径所在磁盘剩余空间校验 - :return: 校验结果对象 - """ - path = os.path.join('/', 'tmp', 'finetune_options_check') - - if not os.path.exists(path): - os.makedirs(path, exist_ok=True) - - os.chmod(path, 0o750) - - file_path = os.path.join(path, 'finetune_options_check.txt') - - with os.fdopen(os.open(file_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - file.write('finetune options check file content.') - - option = FinetuneOptionsCheck( - option_name='test_finetune_options_check', option_value=file_path, disk_space_check=disk_free_space_check) - - path_length_check_param = PathLengthCheckParam(path_min_limit=DEFAULT_PATH_LEN_MIN_LIMIT, - path_max_limit=DEFAULT_PATH_LEN_MAX_LIMIT, - file_min_limit=DEFAULT_FILE_LEN_MIN_LIMIT, - file_max_limit=DEFAULT_FILE_LEN_MAX_LIMIT) - - path_content_check_param = PathContentCheckParam(base_whitelist_mode='ALL', - extra_whitelist=['/', '_', '.']) - - check_param = FinetuneOptionsCheckParam(path_length_check_param=path_length_check_param, - path_content_check_param=path_content_check_param, - mode=PATH_MODE_LIMIT, - path_including_file=True, - force_quit=True, - quiet=True) - - option.check(check_param) - - if os.path.exists(file_path): - os.remove(file_path) - - if os.path.exists(path): - os.rmdir(path) - - return option - - -class TestFinetuneOptionsCheck(unittest.TestCase): - def test_finetune_options_check_with_disk_free_space_check(self): - """ - 测试finetune接口参数通用校验, 在开启路径剩余空间校验的情况 - """ - logging.info('Start test_finetune_options_check_with_disk_free_space_check.') - base_check(disk_free_space_check=True) - logging.info('Finish test_finetune_options_check_with_disk_free_space_check.') - - def test_finetune_options_check_without_disk_free_space_check(self): - """ - 测试finetune接口参数通用校验, 在不开启路径剩余空间校验的情况 - """ - logging.info('Start test_finetune_options_check_without_disk_free_space_check.') - base_check(disk_free_space_check=False) - logging.info('Finish test_finetune_options_check_without_disk_free_space_check.') - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/task/finetune/test_finetune_task.py b/test/unit_test/task/finetune/test_finetune_task.py deleted file mode 100644 index 9c53ff7..0000000 --- a/test/unit_test/task/finetune/test_finetune_task.py +++ /dev/null @@ -1,402 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - -import logging -import os -import shutil -import unittest -from unittest import mock -import pytest -import yaml -from mindpet.task.finetune.finetune_task import FinetuneTask -from mindpet.utils.constants import DEFAULT_FLAGS, DEFAULT_MODES -from mindpet.utils.exceptions import PathLoopError, ReadYamlFileError, ModelConfigKeysInfoError, \ - ModelConfigParamsInfoError, TaskError, CreateProcessError, MonitorProcessRspError, UnexpectedError - -logging.getLogger().setLevel(logging.INFO) - - -class TestFinetuneTask(unittest.TestCase): - root_path = None - data_path = None - output_path = None - pretrained_model_path = None - model_config_path = None - boot_file_path = None - - @classmethod - def setUpClass(cls): - cls.root_path = os.path.join('/', 'tmp', 'finetune_task') - if not os.path.exists(cls.root_path): - os.makedirs(cls.root_path, exist_ok=True) - - cls.data_path = os.path.join(cls.root_path, 'data_path') - if not os.path.exists(cls.data_path): - os.makedirs(cls.data_path, exist_ok=True) - - cls.output_path = os.path.join(cls.root_path, 'output_path') - if not os.path.exists(cls.output_path): - os.makedirs(cls.output_path, exist_ok=True) - - cls.pretrained_model_path = os.path.join(cls.root_path, 'pretrained_model_files') - if not os.path.exists(cls.pretrained_model_path): - os.makedirs(cls.pretrained_model_path, exist_ok=True) - - cls.model_config_path = os.path.join(cls.root_path, 'model_config.yaml') - model_config_content = {'params': {'lr': '1e-4'}, 'freeze': {'block1': 'layer1'}} - with os.fdopen(os.open(cls.model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - yaml.dump(model_config_content, file) - - cls.boot_file_path = os.path.join(cls.root_path, 'boot.py') - with os.fdopen(os.open(cls.boot_file_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - file.write('print("enter into boot.py process.")') - - @classmethod - def tearDownClass(cls): - if os.path.exists(cls.model_config_path): - os.remove(cls.model_config_path) - - if os.path.exists(cls.boot_file_path): - os.remove(cls.boot_file_path) - - if os.path.exists(cls.data_path): - shutil.rmtree(cls.data_path) - - if os.path.exists(cls.output_path): - shutil.rmtree(cls.output_path) - - if os.path.exists(cls.pretrained_model_path): - os.rmdir(cls.pretrained_model_path) - - if os.path.exists(cls.root_path): - shutil.rmtree(cls.root_path) - - def test_finetune_task_init_with_in_out_loop_path(self): - """ - 测试微调任务构造方法中, 传入了循环嵌套路径的情况 - """ - logging.info('Start test_finetune_task_init_with_in_out_loop_path.') - - output_path = os.path.join(self.data_path, 'inner_outputs') - - if not os.path.exists(output_path): - os.makedirs(output_path, exist_ok=True) - - with self.assertRaises(PathLoopError): - FinetuneTask(data_path=self.data_path, - output_path=output_path, - pretrainde_model_path=self.pretrained_model_path, - model_config_path=self.model_config_path, - boot_file_path=self.boot_file_path) - - if os.path.exists(output_path): - os.rmdir(output_path) - - logging.info('Finish test_finetune_task_init_with_in_out_loop_path.') - - def test_finetune_task_init_with_invalid_model_config_path(self): - """ - 测试微调任务构造方法中, 参数model_config_path传入不合理类型参数的情况 - """ - logging.info('Start test_finetune_task_init_with_invalid_model_config_path.') - - model_config_path = os.path.join(self.root_path, 'temp_model_config.yaml') - - with self.assertRaises(ReadYamlFileError): - FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrainde_model_path=self.pretrained_model_path, - model_config_path=model_config_path, - boot_file_path=self.boot_file_path) - - if os.path.exists(model_config_path): - os.remove(model_config_path) - - logging.info('Finish test_finetune_task_init_with_invalid_model_config_path.') - - def test_finetune_task_init_with_invalid_key_in_model_config_path(self): - """ - 测试微调任务构造方法中, 参数model_config_path传入不合理类型参数的情况 - """ - logging.info('Start test_finetune_task_init_with_invalid_key_in_model_config_path.') - - yaml_content = {'invalid_key': 'value'} - - model_config_path = os.path.join(self.root_path, 'temp_model_config.yaml') - - with os.fdopen(os.open(model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - yaml.dump(yaml_content, file) - - with self.assertRaises(ModelConfigKeysInfoError): - FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=model_config_path, - boot_file_path=self.boot_file_path) - - if os.path.exists(model_config_path): - os.remove(model_config_path) - - logging.info('Finish test_finetune_task_init_with_invalid_key_in_model_config_path.') - - def test_finetune_task_init_with_none_params_in_model_config_path(self): - """ - 测试微调任务构造方法中, 参数model_config_path文件中, params部分信息为None的情况 - """ - logging.info('Start test_finetune_task_init_with_none_params_in_model_config_path.') - - yaml_content = {'params': None} - - model_config_path = os.path.join(self.root_path, 'temp_model_config.yaml') - - with os.fdopen(os.open(model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - yaml.dump(yaml_content, file) - - with self.assertRaises(ModelConfigParamsInfoError): - FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=model_config_path, - boot_file_path=self.boot_file_path) - - if os.path.exists(model_config_path): - os.remove(model_config_path) - - logging.info('Finish test_finetune_task_init_with_none_params_in_model_config_path.') - - def test_finetune_task_init_with_invalid_params_config_in_model_config_path(self): - """ - 测试微调任务构造方法中, 参数model_config_path文件中, params部分包含不合理信息的情况 - """ - logging.info('Start test_finetune_task_init_with_invalid_params_config_in_model_config_path.') - - yaml_content = {'params': {'--key': 'value'}} - - model_config_path = os.path.join(self.root_path, 'temp_model_config.yaml') - - with os.fdopen(os.open(model_config_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - yaml.dump(yaml_content, file) - - with self.assertRaises(ModelConfigParamsInfoError): - FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=model_config_path, - boot_file_path=self.boot_file_path) - - if os.path.exists(model_config_path): - os.remove(model_config_path) - - logging.info('Finish test_finetune_task_init_with_invalid_params_config_in_model_config_path.') - - @mock.patch('tk.task.finetune.finetune_task.read_yaml_file') - def test_finetune_task_init_with_read_model_config_yaml_throw_exception(self, mock_func): - """ - 测试FinetuneTask在解析model_config.yaml文件params时触发Exception类型异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_init_with_read_model_config_yaml_throw_exception.') - - mock_func.side_effect = RuntimeError - with self.assertRaises(UnexpectedError): - self._get_default_finetune_task() - - logging.info('Finish test_finetune_task_init_with_read_model_config_yaml_throw_exception.') - - def test_finetune_task_init_without_model_config_path(self): - """ - 测试微调任务构造方法中, 不传model_config_path的情况 - """ - logging.info('Start test_finetune_task_init_without_model_config_path.') - - finetune_task = FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=None, - boot_file_path=self.boot_file_path) - - param_dict = dict() - command_params = finetune_task.command_params - - for i in range(0, len(command_params), 2): - param_dict[command_params[i]] = command_params[i + 1] - - self.assertNotIn('--lr', param_dict.keys()) - - logging.info('Finish test_finetune_task_init_without_model_config_path.') - - @mock.patch('tk.task.finetune.finetune_task.FinetuneTask._process_param_and_command') - def test_finetune_task_init_with_process_param_and_command_throw_keyboard_interrupt(self, mock_func): - """ - 测试微调任务构造方法中, 组装子进程命令时抛KeyboardInterrupt异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_init_with_process_param_and_command_throw_keyboard_interrupt.') - - mock_func.side_effect = KeyboardInterrupt - with self.assertRaises(KeyboardInterrupt): - self._get_default_finetune_task() - - logging.info('Finish test_finetune_task_init_with_process_param_and_command_throw_keyboard_interrupt.') - - def test_finetune_task_init_with_normal_params_and_freeze_configs_in_model_config_path(self): - """ - 测试微调任务构造方法中, model_config_path文件中params和freeze部分内容均正常的情况 - """ - logging.info('Start test_finetune_task_init_with_normal_params_and_freeze_configs_in_model_config_path.') - - finetune_task = self._get_default_finetune_task() - - param_dict = dict() - command_params = finetune_task.command_params - - for i in range(0, len(command_params), 2): - param_dict[command_params[i]] = command_params[i + 1] - - self.assertIn(self.output_path, param_dict.get('--output_path')) - self.assertEqual(param_dict.get('--data_path'), self.data_path) - self.assertEqual(param_dict.get('--pretrained_model_path'), self.pretrained_model_path) - self.assertEqual(param_dict.get('--lr'), '1e-4') - self.assertEqual(param_dict.get('--advanced_config'), self.model_config_path) - - logging.info('Finish test_finetune_task_init_with_normal_params_and_freeze_configs_in_model_config_path.') - - def test_finetune_task_with_wrong_subtask_exit_code(self): - """ - 测试微调任务执行过程中, 子进程任务返回错误退出码的情况 - """ - logging.info('Start test_finetune_task_with_wrong_subtask_exit_code.') - - if os.path.exists(self.boot_file_path): - os.remove(self.boot_file_path) - - with os.fdopen(os.open(self.boot_file_path, DEFAULT_FLAGS, DEFAULT_MODES), 'w') as file: - file.write('raise RuntimeError') - - finetune_task = self._get_default_finetune_task() - - with self.assertRaises(TaskError): - finetune_task.start() - - logging.info('Finish test_finetune_task_with_wrong_subtask_exit_code.') - - def test_finetune_task_with_valid_subtask_exit_code(self): - """ - 测试微调任务执行过程中, 子进程任务返回正常退出码的情况 - """ - logging.info('Start test_finetune_task_with_valid_subtask_exit_code.') - - finetune_task = self._get_default_finetune_task() - - self.assertTrue(finetune_task.start()) - - logging.info('Finish test_finetune_task_with_valid_subtask_exit_code.') - - @mock.patch('subprocess.Popen') - def test_finetune_task_with_create_subprocess_command_throw_keyboard_interrupt(self, mock_func): - """ - 测试微调任务执行过程中, 创建子进程对象时抛出KeyboardInterrupt异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_create_subprocess_command_throw_keyboard_interrupt.') - - finetune_task = self._get_default_finetune_task() - - mock_func.side_effect = KeyboardInterrupt - with self.assertRaises(KeyboardInterrupt): - finetune_task.start() - - logging.info('Finish test_finetune_task_with_create_subprocess_command_throw_keyboard_interrupt.') - - @mock.patch('subprocess.Popen') - def test_finetune_task_with_create_subprocess_command_throw_exception(self, mock_func): - """ - 测试微调任务执行过程中, 创建子进程对象时抛出Exception类型异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_create_subprocess_command_throw_exception.') - - finetune_task = self._get_default_finetune_task() - - mock_func.side_effect = RuntimeError - with self.assertRaises(CreateProcessError): - finetune_task.start() - - logging.info('Finish test_finetune_task_with_create_subprocess_command_throw_exception.') - - @mock.patch('subprocess.Popen') - def test_finetune_task_with_create_subprocess_command_throw_exception_containing_err_msg(self, mock_func): - """ - 测试微调任务执行过程中, 创建子进程对象时抛出Exception类型异常(包含错误信息)的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_create_subprocess_command_throw_exception_containing_err_msg.') - - finetune_task = self._get_default_finetune_task() - - mock_func.side_effect = RuntimeError('runtime error occurred!!!') - with self.assertRaises(CreateProcessError): - finetune_task.start() - - logging.info('Finish test_finetune_task_with_create_subprocess_command_throw_exception_containing_err_msg.') - - @mock.patch('tk.task.finetune.finetune_task.monitor_process_rsp_code') - def test_finetune_task_with_monitor_process_rsp_code_throw_keyboard_interrupt(self, mock_func): - """ - 测试FinetuneTask在监测子进程执行过程中出现KeyboardInterrupt异常的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_monitor_process_rsp_code_throw_keyboard_interrupt.') - - mock_func.side_effect = KeyboardInterrupt - task = self._get_default_finetune_task() - with self.assertRaises(KeyboardInterrupt): - task.start() - - logging.info('Finish test_finetune_task_with_monitor_process_rsp_code_throw_keyboard_interrupt.') - - @mock.patch('tk.task.finetune.finetune_task.monitor_process_rsp_code') - def test_finetune_task_with_monitor_process_rsp_code_throw_exception_containing_err_msg(self, mock_func): - """ - 测试FinetuneTask在监测子进程执行过程中出现Exception类型异常(含错误信息)的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_monitor_process_rsp_code_throw_exception_containing_err_msg.') - - mock_func.side_effect = RuntimeError('finetune task raise runtime error') - task = self._get_default_finetune_task() - with self.assertRaises(MonitorProcessRspError): - task.start() - - logging.info('Finish test_finetune_task_with_monitor_process_rsp_code_throw_exception_containing_err_msg.') - - @mock.patch('tk.task.finetune.finetune_task.monitor_process_rsp_code') - def test_finetune_task_with_monitor_process_rsp_code_throw_exception(self, mock_func): - """ - 测试FinetuneTask在监测子进程执行过程中出现Exception类型异常(不包含错误信息)的情况 - :param mock_func: mock方法 - """ - logging.info('Start test_finetune_task_with_monitor_process_rsp_code_throw_exception.') - - mock_func.side_effect = RuntimeError - task = self._get_default_finetune_task() - with self.assertRaises(MonitorProcessRspError): - task.start() - - logging.info('Finish test_finetune_task_with_monitor_process_rsp_code_throw_exception.') - - def _get_default_finetune_task(self): - """ - 构建默认的finetune任务 - :return: finetune任务对象 - """ - return FinetuneTask(data_path=self.data_path, - output_path=self.output_path, - pretrained_model_path=self.pretrained_model_path, - model_config_path=self.model_config_path, - boot_file_path=self.boot_file_path) - - -if __name__ == '__main__': - pytest.main(['-s', os.path.abspath(__file__)]) diff --git a/test/unit_test/utils/resource/timeout_monitor_task.py b/test/unit_test/utils/resource/timeout_monitor_task.py old mode 100644 new mode 100755 diff --git a/test/unit_test/utils/test_entrance_monitor.py b/test/unit_test/utils/test_entrance_monitor.py old mode 100644 new mode 100755 index c4781ec..faed5f3 --- a/test/unit_test/utils/test_entrance_monitor.py +++ b/test/unit_test/utils/test_entrance_monitor.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import unittest import logging diff --git a/test/unit_test/utils/test_io_utils.py b/test/unit_test/utils/test_io_utils.py old mode 100644 new mode 100755 index 97fcfe5..f957083 --- a/test/unit_test/utils/test_io_utils.py +++ b/test/unit_test/utils/test_io_utils.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import json import shutil diff --git a/test/unit_test/utils/test_task_utils.py b/test/unit_test/utils/test_task_utils.py old mode 100644 new mode 100755 index 9fddfaa..ad03947 --- a/test/unit_test/utils/test_task_utils.py +++ b/test/unit_test/utils/test_task_utils.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. - +import sys +sys.path.append('.') import os import logging import shutil @@ -52,7 +53,7 @@ def test_create_output_path_subdir_with_duplicate_uuid(self, mock_func): output_path = os.path.join('/', 'tmp', 'task_utils', 'outputs') - duplicate_output_full_path = os.path.join(output_path, 'TK_UUID1') + duplicate_output_full_path = os.path.join(output_path, 'MINDPET_UUID1') if not os.path.exists(duplicate_output_full_path): os.makedirs(duplicate_output_full_path) @@ -88,7 +89,7 @@ def test_create_output_path_subdir_with_uuid_when_makedirs_throw_exception(self, logging.info('Finish test_create_output_path_subdir_with_uuid_when_makedirs_throw_exception.') - @mock.patch('tk.utils.task_utils.ModelConfigKeysInfoError') + @mock.patch('mindpet.utils.task_utils.ModelConfigKeysInfoError') def test_model_config_keys_check_item_throw_attribute_error(self, mock_func): """ 测试model_config配置信息中的首层key名称合法性时, 出现content为None触发AttributeError的问题