diff --git a/api/commands.py b/api/commands.py index 3bf8bc0ecc45f2..db96fbae461f4d 100644 --- a/api/commands.py +++ b/api/commands.py @@ -411,7 +411,8 @@ def migrate_knowledge_vector_database(): try: click.echo( click.style( - f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.", + f"Start to created vector index with {len(documents)} documents of {segments_count}" + f" segments for dataset {dataset.id}.", fg="green", ) ) diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index 4806b02b558df8..b18c127f6279d4 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -30,7 +30,8 @@ def get(self, app_model): args = parser.parse_args() sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(*) AS message_count + SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + count(*) AS message_count FROM messages where app_id = :app_id """ arg_dict = {"tz": account.timezone, "app_id": app_model.id} @@ -84,7 +85,8 @@ def get(self, app_model): args = parser.parse_args() sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.conversation_id) AS conversation_count + SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + count(distinct messages.conversation_id) AS conversation_count FROM messages where app_id = :app_id """ arg_dict = {"tz": account.timezone, "app_id": app_model.id} @@ -138,7 +140,8 @@ def get(self, app_model): args = parser.parse_args() sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.from_end_user_id) AS terminal_count + SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + count(distinct messages.from_end_user_id) AS terminal_count FROM messages where app_id = :app_id """ arg_dict = {"tz": account.timezone, "app_id": app_model.id} diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index 942271a634c21a..608e1a33e710ea 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -91,7 +91,8 @@ def get(self, app_model): args = parser.parse_args() sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count + SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + count(distinct workflow_runs.created_by) AS terminal_count FROM workflow_runs WHERE app_id = :app_id AND triggered_from = :triggered_from diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 7667b30e348a98..46223d104fe03f 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -64,7 +64,8 @@ def decorated(*args, **kwargs): elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size: abort(403, "The capacity of the vector space has reached the limit of your subscription.") elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size: - # The api of file upload is used in the multiple places, so we need to check the source of the request from datasets + # The api of file upload is used in the multiple places, + # so we need to check the source of the request from datasets source = request.args.get("source") if source == "datasets": abort(403, "The number of documents has reached the limit of your subscription.") diff --git a/api/controllers/web/wraps.py b/api/controllers/web/wraps.py index 93dc691d623949..c327c3df18526c 100644 --- a/api/controllers/web/wraps.py +++ b/api/controllers/web/wraps.py @@ -80,7 +80,8 @@ def _validate_web_sso_token(decoded, system_features, app_code): if not source or source != "sso": raise WebSSOAuthRequiredError() - # Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login + # Check if SSO is not enforced for web, and if the token source is SSO, + # raise an error and redirect to normal passport login if not system_features.sso_enforced_for_web or not app_web_sso_enabled: source = decoded.get("token_source") if source and source == "sso": diff --git a/api/core/agent/prompt/template.py b/api/core/agent/prompt/template.py index cb98f5501ddd08..ef64fd29fc3a76 100644 --- a/api/core/agent/prompt/template.py +++ b/api/core/agent/prompt/template.py @@ -41,7 +41,8 @@ {{historic_messages}} Question: {{query}} {{agent_scratchpad}} -Thought:""" +Thought:""" # noqa: E501 + ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}} Thought:""" @@ -86,7 +87,8 @@ ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -""" +""" # noqa: E501 + ENGLISH_REACT_CHAT_AGENT_SCRATCHPAD_TEMPLATES = "" diff --git a/api/core/app/apps/workflow_logging_callback.py b/api/core/app/apps/workflow_logging_callback.py index cdd21bf7c2e94d..388cb831802277 100644 --- a/api/core/app/apps/workflow_logging_callback.py +++ b/api/core/app/apps/workflow_logging_callback.py @@ -84,10 +84,12 @@ def on_workflow_node_execute_succeeded(self, event: NodeRunSucceededEvent) -> No if route_node_state.node_run_result: node_run_result = route_node_state.node_run_result self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="green" + f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + color="green", ) self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", color="green", ) self.print_text( @@ -114,14 +116,17 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None: node_run_result = route_node_state.node_run_result self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red" + f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + color="red", ) self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", color="red", ) self.print_text( - f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", color="red" + f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", + color="red", ) def on_node_text_chunk(self, event: NodeRunStreamChunkEvent) -> None: diff --git a/api/core/file/message_file_parser.py b/api/core/file/message_file_parser.py index 8feaabedbbdf45..83059b216ee66f 100644 --- a/api/core/file/message_file_parser.py +++ b/api/core/file/message_file_parser.py @@ -188,7 +188,8 @@ def _to_file_obj(self, file: Union[dict, MessageFile], file_extra_config: FileEx def _check_image_remote_url(self, url): try: headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } def is_s3_presigned_url(url): diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 7ee6e6381755f2..4932284540738a 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -89,7 +89,8 @@ def execute_code(cls, language: CodeLanguage, preload: str, code: str) -> str: raise CodeExecutionError("Code execution service is unavailable") elif response.status_code != 200: raise Exception( - f"Failed to execute code, got status code {response.status_code}, please check if the sandbox service is running" + f"Failed to execute code, got status code {response.status_code}," + f" please check if the sandbox service is running" ) except CodeExecutionError as e: raise e diff --git a/api/core/helper/tool_parameter_cache.py b/api/core/helper/tool_parameter_cache.py index 4c3b736186383d..e848b46c5633ab 100644 --- a/api/core/helper/tool_parameter_cache.py +++ b/api/core/helper/tool_parameter_cache.py @@ -14,7 +14,10 @@ class ToolParameterCache: def __init__( self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str ): - self.cache_key = f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}:identity_id:{identity_id}" + self.cache_key = ( + f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}" + f":identity_id:{identity_id}" + ) def get(self) -> Optional[dict]: """ diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 7ab257872f54c6..c40b6d180804cf 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -59,24 +59,27 @@ } User Input: -""" +""" # noqa: E501 SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" - "MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" + "MAKE SURE your output is the SAME language as the Assistant's latest response" + "(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "The output must be an array in JSON format following the specified schema:\n" '["question1","question2","question3"]\n' ) GENERATOR_QA_PROMPT = ( - " The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step." + " The user will send a long text. Generate a Question and Answer pairs only using the knowledge" + " in the long text. Please think step by step." "Step 1: Understand and summarize the main content of this text.\n" "Step 2: What key information or concepts are mentioned in this text?\n" "Step 3: Decompose or combine multiple pieces of information and concepts.\n" "Step 4: Generate questions and answers based on these key information and concepts.\n" " The questions should be clear and detailed, and the answers should be detailed and complete. " - "You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n" + "You must answer in {language}, in a style that is clear and detailed in {language}." + " No language other than {language} should be used. \n" " Use the following format: Q1:\nA1:\nQ2:\nA2:...\n" "" ) @@ -94,7 +97,7 @@ - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template with at least 300 words and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """ Here is a task description for which I would like you to create a high-quality prompt template for: @@ -109,7 +112,7 @@ - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """ I need to extract the following information from the input text. The tag specifies the 'type', 'description' and 'required' of the information to be extracted. @@ -134,7 +137,7 @@ ### Answer I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text. -""" +""" # noqa: E501 RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """ @@ -150,4 +153,4 @@ Here is the task description: {{INPUT_TEXT}} You just need to generate the output -""" +""" # noqa: E501 diff --git a/api/core/model_runtime/entities/defaults.py b/api/core/model_runtime/entities/defaults.py index e94be6f9181cd1..4d0c9aa08f7337 100644 --- a/api/core/model_runtime/entities/defaults.py +++ b/api/core/model_runtime/entities/defaults.py @@ -8,8 +8,11 @@ }, "type": "float", "help": { - "en_US": "Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.", - "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。", + "en_US": "Controls randomness. Lower temperature results in less random completions." + " As the temperature approaches zero, the model will become deterministic and repetitive." + " Higher temperature results in more random completions.", + "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。" + "较高的温度会导致更多的随机完成。", }, "required": False, "default": 0.0, @@ -24,7 +27,8 @@ }, "type": "float", "help": { - "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.", + "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options" + " are considered.", "zh_Hans": "通过核心采样控制多样性:0.5表示考虑了一半的所有可能性加权选项。", }, "required": False, @@ -88,7 +92,8 @@ }, "type": "int", "help": { - "en_US": "Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.", + "en_US": "Specifies the upper limit on the length of generated results." + " If the generated results are truncated, you can increase this parameter.", "zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。", }, "required": False, @@ -104,7 +109,8 @@ }, "type": "string", "help": { - "en_US": "Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.", + "en_US": "Set a response format, ensure the output from llm is a valid code block as possible," + " such as JSON, XML, etc.", "zh_Hans": "设置一个返回格式,确保llm的输出尽可能是有效的代码块,如JSON、XML等", }, "required": False, diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index 09d2d7e54da9e1..d19ee8389d6ed5 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -72,7 +72,8 @@ def _transform_invoke_error(self, error: Exception) -> InvokeError: if isinstance(error, tuple(model_errors)): if invoke_error == InvokeAuthorizationError: return invoke_error( - description=f"[{provider_name}] Incorrect model credentials provided, please check and try again. " + description=f"[{provider_name}] Incorrect model credentials provided," + f" please check and try again. " ) return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}") diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index 5c39186e6504e6..e8789ec7df1825 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -187,7 +187,7 @@ def _code_block_mode_wrapper( {{instructions}} -""" +""" # noqa: E501 code_block = model_parameters.get("response_format", "") if not code_block: @@ -830,7 +830,8 @@ def _validate_and_filter_model_parameters(self, model: str, model_parameters: di else: if parameter_value != round(parameter_value, parameter_rule.precision): raise ValueError( - f"Model Parameter {parameter_name} should be round to {parameter_rule.precision} decimal places." + f"Model Parameter {parameter_name} should be round to {parameter_rule.precision}" + f" decimal places." ) # validate parameter value range diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 30e9d2e9f2e0d1..0cb66842e7e261 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -51,7 +51,7 @@ {{instructions}} -""" +""" # noqa: E501 class AnthropicLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index c2744691c3ed6e..eec9ebe067221f 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -16,6 +16,15 @@ AZURE_OPENAI_API_VERSION = "2024-02-15-preview" +DEFAULT_PARAM_SEED_HELP = I18nObject( + zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性," + "您应该参考 system_fingerprint 响应参数来监视变化。", + en_US="If specified, model will make a best effort to sample deterministically," + " such that repeated requests with the same seed and parameters should return the same result." + " Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter" + " to monitor changes in the backend.", +) + def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: rule = ParameterRule( @@ -229,10 +238,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -297,10 +303,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -365,10 +368,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -433,10 +433,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -502,10 +499,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -571,10 +565,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -650,10 +641,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -719,10 +707,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -788,10 +773,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -867,10 +849,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -936,10 +915,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -1000,10 +976,7 @@ class AzureBaseModel(BaseModel): name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py index bea6777f833a49..a7ca28d49d636e 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py @@ -15,6 +15,7 @@ def count_english_vocabularies(cls, text: str) -> int: @classmethod def _get_num_tokens(cls, text: str) -> int: - # tokens = number of Chinese characters + number of English words * 1.3 (for estimation only, subject to actual return) + # tokens = number of Chinese characters + number of English words * 1.3 + # (for estimation only, subject to actual return) # https://platform.baichuan-ai.com/docs/text-Embedding return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index e07f2a419a00e3..239ae52b4cd50f 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -52,7 +52,7 @@ {{instructions}} -""" +""" # noqa: E501 class BedrockLargeLanguageModel(LargeLanguageModel): @@ -541,7 +541,9 @@ def validate_credentials(self, model: str, credentials: dict) -> None: "max_tokens": 32, } elif "ai21" in model: - # ValidationException: Malformed input request: #/temperature: expected type: Number, found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, please reformat your input and try again. + # ValidationException: Malformed input request: #/temperature: expected type: Number, + # found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, + # please reformat your input and try again. required_params = { "temperature": 0.7, "topP": 0.9, diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py index 274ff02095b105..53930d696fc312 100644 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ b/api/core/model_runtime/model_providers/google/llm/llm.py @@ -45,7 +45,7 @@ {{instructions}} -""" +""" # noqa: E501 class GoogleLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py index 56c51e8888636c..288637495f6c87 100644 --- a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py +++ b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py @@ -54,7 +54,8 @@ def _get_tei_extra_parameter(server_url: str) -> TeiModelExtraParameter: url = str(URL(server_url) / "info") - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py index c056ab7a0855c7..b57e5e1c2b2ed8 100644 --- a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py @@ -131,7 +131,8 @@ def _convert_prompt_messages_to_dicts(self, prompt_messages: list[PromptMessage] { "Role": message.role.value, # fix set content = "" while tool_call request - # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter message:Messages Content and Contents not allowed empty at the same time. + # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter + # message:Messages Content and Contents not allowed empty at the same time. "Content": " ", # message.content if (message.content is not None) else "", "ToolCalls": dict_tool_calls, } diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py index 4d3747dc842ea4..1c98c6be6ca72d 100644 --- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py +++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py @@ -93,7 +93,8 @@ def _add_custom_parameters(self, credentials: dict, model: str) -> None: def _validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials diff --git a/api/core/model_runtime/model_providers/oci/llm/llm.py b/api/core/model_runtime/model_providers/oci/llm/llm.py index ad5197a154c282..10c8d907c1e77c 100644 --- a/api/core/model_runtime/model_providers/oci/llm/llm.py +++ b/api/core/model_runtime/model_providers/oci/llm/llm.py @@ -239,7 +239,8 @@ def _generate( config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py index df77db47d9fb53..80ad2be9f56872 100644 --- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py @@ -146,7 +146,8 @@ def _embedding_invoke(self, model: str, credentials: dict, texts: list[str]) -> config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 160eea0148702b..3f32f454e499d7 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -639,9 +639,10 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode type=ParameterType.STRING, help=I18nObject( en_US="Sets how long the model is kept in memory after generating a response. " - "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours). " - "A negative number keeps the model loaded indefinitely, and '0' unloads the model immediately after generating a response. " - "Valid time units are 's','m','h'. (Default: 5m)" + "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)." + " A negative number keeps the model loaded indefinitely, and '0' unloads the model" + " immediately after generating a response." + " Valid time units are 's','m','h'. (Default: 5m)" ), ), ParameterRule( diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 5950b77a96a015..578687b5d3a819 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -37,7 +37,7 @@ {{instructions}} -""" +""" # noqa: E501 class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 24317b488c2ded..41ca163a92ff84 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -103,7 +103,8 @@ def get_num_tokens( def validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials @@ -262,7 +263,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode return entity - # validate_credentials method has been rewritten to use the requests library for compatibility with all providers following OpenAI's API standard. + # validate_credentials method has been rewritten to use the requests library for compatibility with all providers + # following OpenAI's API standard. def _generate( self, model: str, diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py index 25223e83405109..b99a657e715edb 100644 --- a/api/core/model_runtime/model_providers/spark/llm/_client.py +++ b/api/core/model_runtime/model_providers/spark/llm/_client.py @@ -61,7 +61,10 @@ def create_url(self, host: str, path: str, api_base: str, api_key: str, api_secr signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8") - authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"' + authorization_origin = ( + f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line",' + f' signature="{signature_sha_base64}"' + ) authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8") diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py index 1014b53f39ef62..9646e209b26364 100644 --- a/api/core/model_runtime/model_providers/upstage/llm/llm.py +++ b/api/core/model_runtime/model_providers/upstage/llm/llm.py @@ -34,7 +34,7 @@ {{instructions}} -""" +""" # noqa: E501 class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index 09a7f53f2816d1..afaec46fb55342 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -114,7 +114,8 @@ def _generate_anthropic( credentials.refresh(request) token = credentials.token - # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available in us-central1 region + # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available + # in us-central1 region if "opus" in model or "claude-3-5-sonnet" in model: location = "us-east5" else: @@ -123,7 +124,8 @@ def _generate_anthropic( # use access token to authenticate if token: client = AnthropicVertex(region=location, project_id=project_id, access_token=token) - # When access token is empty, try to use the Google Cloud VM's built-in service account or the GOOGLE_APPLICATION_CREDENTIALS environment variable + # When access token is empty, try to use the Google Cloud VM's built-in service account + # or the GOOGLE_APPLICATION_CREDENTIALS environment variable else: client = AnthropicVertex( region=location, diff --git a/api/core/model_runtime/model_providers/wenxin/llm/llm.py b/api/core/model_runtime/model_providers/wenxin/llm/llm.py index 1ff0ac7ad27ce9..8feedbfe55f08e 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/llm.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/llm.py @@ -28,7 +28,7 @@ You should also complete the text started with ``` but not tell ``` directly. -""" +""" # noqa: E501 class ErnieBotLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index b2c837dee100ee..429a2c52406a82 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -130,7 +130,8 @@ def validate_credentials(self, model: str, credentials: dict) -> None: credentials["completion_type"] = "completion" else: raise ValueError( - f"xinference model ability {extra_param.model_ability} is not supported, check if you have the right model type" + f"xinference model ability {extra_param.model_ability} is not supported," + f" check if you have the right model type" ) if extra_param.support_function_call: @@ -362,7 +363,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode help=I18nObject( en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they " "appear in the text so far, increasing the model's likelihood to talk about new topics.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚,从而增加模型谈论新话题的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚," + "从而增加模型谈论新话题的可能性。", ), default=0.0, min=-2.0, @@ -382,7 +384,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their " "existing frequency in the text so far, decreasing the model's likelihood to repeat the " "same line verbatim.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚,从而降低模型逐字重复相同内容的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚," + "从而降低模型逐字重复相同内容的可能性。", ), default=0.0, min=-2.0, diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py index 54c8b51654bfbc..18efde758c5b54 100644 --- a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py +++ b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py @@ -101,12 +101,16 @@ def _speech2text_invoke( :param model: model name :param credentials: model credentials - :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpe g,mpga, m4a, ogg, wav, or webm. + :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, + mpga, m4a, ogg, wav, or webm. :param language: The language of the input audio. Supplying the input language in ISO-639-1 :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. - :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose _json, or vtt. - :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output mor e random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model wi ll use log probability to automatically increase the temperature until certain thresholds are hit. + :param response_format: The format of the transcript output, in one of these options: json, text, srt, + verbose_json, or vtt. + :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model will use + log probability to automatically increase the temperature until certain thresholds are hit. :return: text for given audio file """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py index 6ad10e690ddc17..1e05da9c56b27c 100644 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py @@ -76,7 +76,8 @@ def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: st url = str(URL(server_url) / "v1" / "models" / model_uid) - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) @@ -88,7 +89,8 @@ def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: st raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}") if response.status_code != 200: raise RuntimeError( - f"get xinference model extra parameter failed, status code: {response.status_code}, response: {response.text}" + f"get xinference model extra parameter failed, status code: {response.status_code}," + f" response: {response.text}" ) response_json = response.json() diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 484ac088dbc0c5..29a40c08690e47 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -31,7 +31,7 @@ {{instructions}} -```JSON""" +```JSON""" # noqa: E501 class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py index b7cf6bb7fd06c4..7a91f9b79627c4 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py @@ -75,7 +75,8 @@ def __bool__(self) -> Literal[False]: ResponseT = TypeVar( "ResponseT", - bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", + bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol," + " BinaryResponseContent]", ) # for user input files diff --git a/api/core/model_runtime/schema_validators/common_validator.py b/api/core/model_runtime/schema_validators/common_validator.py index e4f35414756400..c05edb72e3552e 100644 --- a/api/core/model_runtime/schema_validators/common_validator.py +++ b/api/core/model_runtime/schema_validators/common_validator.py @@ -67,7 +67,8 @@ def _validate_credential_form_schema( if credential_form_schema.max_length: if len(value) > credential_form_schema.max_length: raise ValueError( - f"Variable {credential_form_schema.variable} length should not greater than {credential_form_schema.max_length}" + f"Variable {credential_form_schema.variable} length should not" + f" greater than {credential_form_schema.max_length}" ) # check the type of value diff --git a/api/core/prompt/prompt_templates/advanced_prompt_templates.py b/api/core/prompt/prompt_templates/advanced_prompt_templates.py index e4b3a61cb4c001..0ab7f526cc4fee 100644 --- a/api/core/prompt/prompt_templates/advanced_prompt_templates.py +++ b/api/core/prompt/prompt_templates/advanced_prompt_templates.py @@ -1,11 +1,11 @@ -CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" +CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" # noqa: E501 -BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" +BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" # noqa: E501 CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " + "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501 }, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, }, @@ -24,7 +24,7 @@ BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" + "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" # noqa: E501 }, "conversation_histories_role": {"user_prefix": "用户", "assistant_prefix": "助手"}, }, diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index 06c20ceb5fdd5a..d223b0decf3d24 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -195,7 +195,8 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc top_k = kwargs.get("top_k", 5) with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name} ORDER BY distance fetch first {top_k} rows only", + f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name}" + f" ORDER BY distance fetch first {top_k} rows only", [numpy.array(query_vector)], ) docs = [] @@ -254,7 +255,8 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: entities.append(token) with self._get_cursor() as cur: cur.execute( - f"select meta, text, embedding FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", + f"select meta, text, embedding FROM {self.table_name}" + f" WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", [" ACCUM ".join(entities)], ) docs = [] diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 38dfd24b5626e5..d2d9e5238b607b 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -139,7 +139,8 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name} ORDER BY distance LIMIT {top_k}", + f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name}" + f" ORDER BY distance LIMIT {top_k}", (json.dumps(query_vector),), ) docs = [] diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index a00b3cba533f6d..244ef9614a48a7 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -30,7 +30,10 @@ from models.model import UploadFile SUPPORT_URL_CONTENT_TYPES = ["application/pdf", "text/plain", "application/json"] -USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" +USER_AGENT = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124" + " Safari/537.36" +) class ExtractProcessor: diff --git a/api/core/rag/retrieval/router/multi_dataset_react_route.py b/api/core/rag/retrieval/router/multi_dataset_react_route.py index 33841cac06adbf..a0494adc60e863 100644 --- a/api/core/rag/retrieval/router/multi_dataset_react_route.py +++ b/api/core/rag/retrieval/router/multi_dataset_react_route.py @@ -14,7 +14,7 @@ PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -Thought:""" +Thought:""" # noqa: E501 FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. @@ -46,7 +46,7 @@ "action": "Final Answer", "action_input": "Final response to human" }} -```""" +```""" # noqa: E501 class ReactMultiDatasetRouter: @@ -204,7 +204,8 @@ def create_chat_prompt( tool_strings = [] for tool in tools: tool_strings.append( - f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query', 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" + f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query'," + f" 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" ) formatted_tools = "\n".join(tool_strings) unique_tool_names = {tool.name for tool in tools} @@ -236,7 +237,7 @@ def create_completion_prompt( suffix = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Question: {input} Thought: {agent_scratchpad} -""" +""" # noqa: E501 tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) diff --git a/api/core/tools/entities/tool_bundle.py b/api/core/tools/entities/tool_bundle.py index da6201c5aaecd2..0c15b2a3711f11 100644 --- a/api/core/tools/entities/tool_bundle.py +++ b/api/core/tools/entities/tool_bundle.py @@ -7,7 +7,8 @@ class ApiToolBundle(BaseModel): """ - This class is used to store the schema information of an api based tool. such as the url, the method, the parameters, etc. + This class is used to store the schema information of an api based tool. + such as the url, the method, the parameters, etc. """ # server_url diff --git a/api/core/tools/entities/values.py b/api/core/tools/entities/values.py index f9db190f91e682..f460df7e25c916 100644 --- a/api/core/tools/entities/values.py +++ b/api/core/tools/entities/values.py @@ -4,52 +4,52 @@ ICONS = { ToolLabelEnum.SEARCH: """ -""", +""", # noqa: E501 ToolLabelEnum.IMAGE: """ -""", +""", # noqa: E501 ToolLabelEnum.VIDEOS: """ -""", +""", # noqa: E501 ToolLabelEnum.WEATHER: """ -""", +""", # noqa: E501 ToolLabelEnum.FINANCE: """ -""", +""", # noqa: E501 ToolLabelEnum.DESIGN: """ -""", +""", # noqa: E501 ToolLabelEnum.TRAVEL: """ -""", +""", # noqa: E501 ToolLabelEnum.SOCIAL: """ -""", +""", # noqa: E501 ToolLabelEnum.NEWS: """ -""", +""", # noqa: E501 ToolLabelEnum.MEDICAL: """ -""", +""", # noqa: E501 ToolLabelEnum.PRODUCTIVITY: """ -""", +""", # noqa: E501 ToolLabelEnum.EDUCATION: """ -""", +""", # noqa: E501 ToolLabelEnum.BUSINESS: """ -""", +""", # noqa: E501 ToolLabelEnum.ENTERTAINMENT: """ -""", +""", # noqa: E501 ToolLabelEnum.UTILITIES: """ -""", +""", # noqa: E501 ToolLabelEnum.OTHER: """ -""", +""", # noqa: E501 } default_tool_label_dict = { diff --git a/api/core/tools/provider/builtin/aippt/tools/aippt.py b/api/core/tools/provider/builtin/aippt/tools/aippt.py index 7cee8f9f799b83..a2d69fbcd1a5ed 100644 --- a/api/core/tools/provider/builtin/aippt/tools/aippt.py +++ b/api/core/tools/provider/builtin/aippt/tools/aippt.py @@ -46,7 +46,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe tool_parameters (dict[str, Any]): The parameters for the tool Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ title = tool_parameters.get("title", "") if not title: diff --git a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py index 98d82c233e0714..2d65ba2d6f4389 100644 --- a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py +++ b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py @@ -104,7 +104,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ query = tool_parameters.get("query", "") diff --git a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py index d6a65b1708281c..a04f5c0fe9f1af 100644 --- a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py +++ b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py @@ -62,7 +62,8 @@ def _invoke( if isinstance(policy_data, dict) and "topics" in policy_data: for topic in policy_data["topics"]: formatted_assessments.append( - f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}, Action: {topic['action']}" + f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}," + f" Action: {topic['action']}" ) else: formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}") diff --git a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py index e1effd066c611a..57cf6d7a308dba 100644 --- a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py +++ b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py @@ -24,7 +24,8 @@ def _invoke( tool_parameters (dict[str, Any]): The parameters for the tool, including 'doc' and 'topic'. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ doc = tool_parameters.get("doc", "") topic = tool_parameters.get("topic", "") diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py index 7606eee7af6cfb..1e77f3c6dfc678 100644 --- a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py @@ -70,7 +70,10 @@ def fetch_files( ) else: # It's a file if is_repository: - file_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/files/{item_path}/raw?ref={branch}" + file_url = ( + f"{domain}/api/v4/projects/{encoded_identifier}/repository/files" + f"/{item_path}/raw?ref={branch}" + ) else: file_url = ( f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}" diff --git a/api/core/tools/provider/builtin/google_translate/tools/translate.py b/api/core/tools/provider/builtin/google_translate/tools/translate.py index 5d57b5fabfb94f..ea3f2077d5d485 100644 --- a/api/core/tools/provider/builtin/google_translate/tools/translate.py +++ b/api/core/tools/provider/builtin/google_translate/tools/translate.py @@ -35,7 +35,8 @@ def _translate(self, content: str, dest: str) -> str: params = {"client": "gtx", "sl": "auto", "tl": dest, "dt": "t", "q": content} headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } response_json = requests.get(url, params=params, headers=headers).json() diff --git a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py index 69cf8aa740b16b..9e0918afa9bd93 100644 --- a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py +++ b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py @@ -114,7 +114,8 @@ def get_controls(self, controls: list) -> dict: } fields.append(field) fields_list.append( - f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}|{field['options'] if field['options'] else ''}|" + f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}" + f"|{field['options'] if field['options'] else ''}|" ) fields.append( diff --git a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py index 592fa230cf4d6f..928694a41f56cf 100644 --- a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py +++ b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py @@ -112,7 +112,10 @@ def _invoke( else: result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"." if result["total"] > 0: - result_text += f" The following are {result['total'] if result['total'] < limit else limit} pieces of data presented in a table format:\n\n{table_header}" + result_text += ( + f" The following are {result['total'] if result['total'] < limit else limit}" + f" pieces of data presented in a table format:\n\n{table_header}" + ) for row in rows: result_values = [] for f in fields: diff --git a/api/core/tools/provider/builtin/searchapi/tools/google.py b/api/core/tools/provider/builtin/searchapi/tools/google.py index d632304a46bba4..5648c62b49ca35 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google.py @@ -64,7 +64,10 @@ def _process_response(res: dict, type: str) -> str: elif type == "link": if "answer_box" in res.keys() and "organic_result" in res["answer_box"].keys(): if "title" in res["answer_box"]["organic_result"].keys(): - toret = f"[{res['answer_box']['organic_result']['title']}]({res['answer_box']['organic_result']['link']})\n" + toret = ( + f"[{res['answer_box']['organic_result']['title']}]" + f"({res['answer_box']['organic_result']['link']})\n" + ) elif "organic_results" in res.keys() and "link" in res["organic_results"][0].keys(): toret = "" for item in res["organic_results"]: diff --git a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py index 46137886bda434..e9464ef450c0a8 100644 --- a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py +++ b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py @@ -311,7 +311,8 @@ def get_runtime_parameters(self) -> list[ToolParameter]: ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.", + llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate" + " as a list of words as possible as detailed, the prompt must be written in English.", required=True, ), ] @@ -321,12 +322,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="image_id", label=I18nObject(en_US="image_id", zh_Hans="image_id"), human_description=I18nObject( - en_US="Image id of the image you want to generate based on, if you want to generate image based on the default image, you can leave this field empty.", + en_US="Image id of the image you want to generate based on, if you want to generate image based" + " on the default image, you can leave this field empty.", zh_Hans="您想要生成的图像的图像 ID,如果您想要基于默认图像生成图像,则可以将此字段留空。", ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image id of the original image, you can leave this field empty if you want to generate a new image.", + llm_description="Image id of the original image, you can leave this field empty if you want to" + " generate a new image.", required=True, options=[ ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name)) @@ -344,12 +347,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="model", label=I18nObject(en_US="Model", zh_Hans="Model"), human_description=I18nObject( - en_US="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=models[0], options=[ @@ -368,12 +373,14 @@ def get_runtime_parameters(self) -> list[ToolParameter]: name="sampler_name", label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"), human_description=I18nObject( - en_US="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的Sampling method,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=sample_methods[0], options=[ diff --git a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py index 26f12864c3942b..b32b0124dd31da 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and list name. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and list name. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py index dfc013a6b8bb73..e98efb81ca673e 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including details for the new card. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including details for the new card. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_board.py b/api/core/tools/provider/builtin/trello/tools/delete_board.py index 9dbd8f78d5e65f..7fc9d1f13c2664 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_board.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_card.py b/api/core/tools/provider/builtin/trello/tools/delete_card.py index 960c3055fe94a4..1de98d639ebb7d 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_card.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_card.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the card ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the card ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py index 03510f196488b1..cabc7ce09359d5 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py index 5b41b128d07ca8..fe42cd9c5cbf86 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py index e3bed2e6e620bc..ff2b1221e767de 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py index 4d8854747c6963..3d7f9f4ad1c996 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and filter. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and filter. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py index ca8aa9c2d5daf4..ccf404068f225e 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool] Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_board.py b/api/core/tools/provider/builtin/trello/tools/update_board.py index 62681eea6b4a9f..1e358b00f49add 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_board.py +++ b/api/core/tools/provider/builtin/trello/tools/update_board.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including board ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including board ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_card.py b/api/core/tools/provider/builtin/trello/tools/update_card.py index 26113f12290613..d25fcbafaa6326 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_card.py +++ b/api/core/tools/provider/builtin/trello/tools/update_card.py @@ -17,7 +17,8 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Union[str, int, bool, Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including the card ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including the card ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/twilio/tools/send_message.py b/api/core/tools/provider/builtin/twilio/tools/send_message.py index 822d0c0ebdd3a8..156249bc960c89 100644 --- a/api/core/tools/provider/builtin/twilio/tools/send_message.py +++ b/api/core/tools/provider/builtin/twilio/tools/send_message.py @@ -72,7 +72,8 @@ class SendMessageTool(BuiltinTool): tool_parameters (Dict[str, Any]): The parameters required for sending the message. Returns: - Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, which includes the status of the message sending operation. + Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, + which includes the status of the message sending operation. """ def _invoke( diff --git a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py index 8e1b0977766706..8effa9818a0c40 100644 --- a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py +++ b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py @@ -1 +1 @@ -VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" +VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" # noqa: E501 diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 9a6a49d8f443b4..645f0861fa06b3 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -193,7 +193,10 @@ def _convert_tool_response_to_str(tool_response: list[ToolInvokeMessage]) -> str response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or response.type == ToolInvokeMessage.MessageType.IMAGE ): - result += "image has been created and sent to user already, you do not need to create it, just tell the user to check it now." + result += ( + "image has been created and sent to user already, you do not need to create it," + " just tell the user to check it now." + ) elif response.type == ToolInvokeMessage.MessageType.JSON: result += f"tool response: {json.dumps(response.message, ensure_ascii=False)}." else: diff --git a/api/core/tools/utils/feishu_api_utils.py b/api/core/tools/utils/feishu_api_utils.py index 7bb026a383a616..44803d7d6531fa 100644 --- a/api/core/tools/utils/feishu_api_utils.py +++ b/api/core/tools/utils/feishu_api_utils.py @@ -89,7 +89,7 @@ def get_document_raw_content(self, document_id: str) -> dict: "content": "云文档\n多人实时协同,插入一切元素。不仅是在线文档,更是强大的创作和互动工具\n云文档:专为协作而生\n" } } - """ + """ # noqa: E501 params = { "document_id": document_id, } diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index c4983ebc65ea8d..03087f9e60d749 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -45,7 +45,7 @@ def transform_tool_invoke_messages( result.append( ToolInvokeMessage( type=ToolInvokeMessage.MessageType.TEXT, - message=f"Failed to download image: {message.message}, you can try to download it yourself.", + message=f"Failed to download image: {message.message}, please try to download it manually.", meta=message.meta.copy() if message.meta is not None else {}, save_as=message.save_as, ) diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index 654c9acaf934b4..210b84b29a72f9 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -315,7 +315,8 @@ def auto_parse_to_tool_bundle( yaml_error = e if loaded_content is None: raise ToolApiSchemaError( - f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}, yaml error: {str(yaml_error)}" + f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}," + f" yaml error: {str(yaml_error)}" ) swagger_error = None @@ -355,5 +356,6 @@ def auto_parse_to_tool_bundle( openapi_plugin_error = e raise ToolApiSchemaError( - f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}, openapi plugin error: {str(openapi_plugin_error)}" + f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}," + f" openapi plugin error: {str(openapi_plugin_error)}" ) diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index 3639b5fff79b7f..fc2f63a241b305 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -38,7 +38,8 @@ def page_result(text: str, cursor: int, max_length: int) -> str: def get_url(url: str, user_agent: str = None) -> str: """Fetch URL and return the contents as a string.""" headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } if user_agent: headers["User-Agent"] = user_agent diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index a07ba2f74038b0..73164fff9ae5a7 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -179,7 +179,8 @@ def _transform_result( ) else: raise ValueError( - f"Output {prefix}.{output_name} is not a valid array. make sure all elements are of the same type." + f"Output {prefix}.{output_name} is not a valid array." + f" make sure all elements are of the same type." ) elif isinstance(output_value, type(None)): pass @@ -201,7 +202,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an object, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an object," + f" got {type(result.get(output_name))} instead." ) else: transformed_result[output_name] = self._transform_result( @@ -228,7 +230,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: @@ -248,7 +251,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_STRING_ARRAY_LENGTH: @@ -268,7 +272,8 @@ def _transform_result( transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH: @@ -283,7 +288,8 @@ def _transform_result( pass else: raise ValueError( - f"Output {prefix}{dot}{output_name}[{i}] is not an object, got {type(value)} instead at index {i}." + f"Output {prefix}{dot}{output_name}[{i}] is not an object," + f" got {type(value)} instead at index {i}." ) transformed_result[output_name] = [ diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 53e8be64153d99..af55688a52c9b7 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -128,11 +128,12 @@ def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: weights = None elif node_data.multiple_retrieval_config.reranking_mode == "weighted_score": reranking_model = None + vector_setting = node_data.multiple_retrieval_config.weights.vector_setting weights = { "vector_setting": { - "vector_weight": node_data.multiple_retrieval_config.weights.vector_setting.vector_weight, - "embedding_provider_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_provider_name, - "embedding_model_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_model_name, + "vector_weight": vector_setting.vector_weight, + "embedding_provider_name": vector_setting.embedding_provider_name, + "embedding_model_name": vector_setting.embedding_model_name, }, "keyword_setting": { "keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight diff --git a/api/core/workflow/nodes/parameter_extractor/prompts.py b/api/core/workflow/nodes/parameter_extractor/prompts.py index c63fded4d02573..58fcecc53b09fd 100644 --- a/api/core/workflow/nodes/parameter_extractor/prompts.py +++ b/api/core/workflow/nodes/parameter_extractor/prompts.py @@ -23,7 +23,7 @@ To illustrate, if the task involves extracting a user's name and their request, your function call might look like this: Ensure your output follows a similar structure to examples. ### Final Output Produce well-formatted function calls in json without XML tags, as shown in the example. -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information from context inside XML tags by calling the function {FUNCTION_CALLING_EXTRACTOR_NAME} with the correct parameters with structure inside XML tags. @@ -33,7 +33,7 @@ \x7bstructure\x7d -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ { @@ -55,7 +55,8 @@ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the location parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the location parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"location": "San Francisco"}}, }, }, @@ -72,7 +73,8 @@ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the food parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the food parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"food": "apple pie"}}, }, }, @@ -117,7 +119,7 @@ ### Answer I should always output a valid JSON object. Output nothing other than the JSON object. ```JSON -""" +""" # noqa: E501 CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and output a valid JSON object. The structure of the JSON object you can found in the instructions. diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index 581f9869221632..ce32b01aa41897 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -12,13 +12,13 @@ {histories} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_USER_PROMPT_1 = """ { "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}], "classification_instructions": ["classify the text based on the feedback provided by customer"]} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ ```json @@ -32,7 +32,7 @@ {"input_text": ["bad service, slow to bring the food"], "categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}], "classification_instructions": []} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ ```json @@ -73,4 +73,4 @@ ### User Input {{"input_text" : ["{input_text}"], "categories" : {categories},"classification_instruction" : ["{classification_instructions}"]}} ### Assistant Output -""" +""" # noqa: E501 diff --git a/api/libs/gmpy2_pkcs10aep_cipher.py b/api/libs/gmpy2_pkcs10aep_cipher.py index f89902c5e8de09..83f9c74e339e17 100644 --- a/api/libs/gmpy2_pkcs10aep_cipher.py +++ b/api/libs/gmpy2_pkcs10aep_cipher.py @@ -204,7 +204,8 @@ def decrypt(self, ciphertext): def new(key, hashAlgo=None, mgfunc=None, label=b"", randfunc=None): - """Return a cipher object :class:`PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption. + """Return a cipher object :class:`PKCS1OAEP_Cipher` + that can be used to perform PKCS#1 OAEP encryption or decryption. :param key: The key object to use to encrypt or decrypt the message. diff --git a/api/models/provider.py b/api/models/provider.py index ff63e03a92e735..644915e781084b 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -65,7 +65,10 @@ class Provider(db.Model): updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) def __repr__(self): - return f"" + return ( + f"" + ) @property def token_is_set(self): diff --git a/api/models/tools.py b/api/models/tools.py index 6b69a219b15184..861066a2d5c4ee 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -62,7 +62,8 @@ class PublishedAppTool(db.Model): description = db.Column(db.Text, nullable=False) # llm_description of the tool, for LLM llm_description = db.Column(db.Text, nullable=False) - # query description, query will be seem as a parameter of the tool, to describe this parameter to llm, we need this field + # query description, query will be seem as a parameter of the tool, + # to describe this parameter to llm, we need this field query_description = db.Column(db.Text, nullable=False) # query name, the name of the query parameter query_name = db.Column(db.String(40), nullable=False) diff --git a/api/models/workflow.py b/api/models/workflow.py index d52749f0ff4e5a..9c93ea4cea84e7 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -246,7 +246,8 @@ def environment_variables(self, value: Sequence[Variable]): if any(var for var in value if not var.id): raise ValueError("environment variable require a unique id") - # Compare inputs and origin variables, if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). + # Compare inputs and origin variables, + # if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). origin_variables_dictionary = {var.id: var for var in self.environment_variables} for i, variable in enumerate(value): if variable.id in origin_variables_dictionary and variable.value == HIDDEN_VALUE: diff --git a/api/poetry.lock b/api/poetry.lock index 103423e5c76b8b..6023f98e2ad5a1 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -10388,4 +10388,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "2dbff415c3c9ca95c8dcfb59fc088ce2c0d00037c44f386a34c87c98e1d8b942" +content-hash = "8179c7e3f91b5a00054e26297040b1969f59b37cb9a707fbaa9c2ea419954718" diff --git a/api/pyproject.toml b/api/pyproject.toml index 23e2b5c549aee0..da80692ccb8ea5 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -29,7 +29,6 @@ select = [ "W605", # invalid-escape-sequence ] ignore = [ - "E501", # line-too-long "E402", # module-import-not-at-top-of-file "E711", # none-comparison "E712", # true-false-comparison @@ -61,16 +60,19 @@ ignore = [ "F401", # unused-import "F811", # redefined-while-unused ] -"tests/*" = [ - "F401", # unused-import - "F811", # redefined-while-unused -] "configs/*" = [ "N802", # invalid-function-name ] "libs/gmpy2_pkcs10aep_cipher.py" = [ "N803", # invalid-argument-name ] +"migrations/versions/*" = [ + "E501", # line-too-long +] +"tests/*" = [ + "F401", # unused-import + "F811", # redefined-while-unused +] [tool.ruff.format] exclude = [ @@ -263,4 +265,4 @@ optional = true [tool.poetry.group.lint.dependencies] dotenv-linter = "~0.5.0" -ruff = "~0.6.1" +ruff = "~0.6.4" diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index 3ded9c0989b53c..6f6074f59640db 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -176,7 +176,8 @@ def get_api_tool_provider_remote_schema(user_id: str, tenant_id: str, url: str): get api tool provider remote schema """ headers = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", "Accept": "*/*", } diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py index 4138cdd40d822c..025913cb17a805 100644 --- a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py +++ b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py @@ -40,7 +40,7 @@ def create_embeddings( usage=Usage(prompt_tokens=2, total_tokens=2), ) - embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" + embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" # noqa: E501 data = [] for i, text in enumerate(input): diff --git a/api/tests/unit_tests/core/app/segments/test_segment.py b/api/tests/unit_tests/core/app/segments/test_segment.py index 7cc339d21200a1..73002623f03dda 100644 --- a/api/tests/unit_tests/core/app/segments/test_segment.py +++ b/api/tests/unit_tests/core/app/segments/test_segment.py @@ -21,9 +21,9 @@ def test_segment_group_to_text(): segments_group = parser.convert_template(template=template, variable_pool=variable_pool) assert segments_group.text == "Hello, fake-user-id! Your query is fake-user-query. And your key is fake-secret-key." - assert ( - segments_group.log - == f"Hello, fake-user-id! Your query is fake-user-query. And your key is {encrypter.obfuscated_token('fake-secret-key')}." + assert segments_group.log == ( + f"Hello, fake-user-id! Your query is fake-user-query." + f" And your key is {encrypter.obfuscated_token('fake-secret-key')}." )