Skip to content

Commit

Permalink
chore: fix unnecessary string concatation in single line (#8311)
Browse files Browse the repository at this point in the history
  • Loading branch information
bowenliang123 authored Sep 13, 2024
1 parent 08c4864 commit 6613b8f
Show file tree
Hide file tree
Showing 30 changed files with 46 additions and 49 deletions.
9 changes: 4 additions & 5 deletions api/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm):
)
@click.confirmation_option(
prompt=click.style(
"Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red"
"Are you sure you want to reset encrypt key pair? this operation cannot be rolled back!", fg="red"
)
)
def reset_encrypt_key_pair():
Expand All @@ -131,7 +131,7 @@ def reset_encrypt_key_pair():

click.echo(
click.style(
"Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id),
"Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id),
fg="green",
)
)
Expand Down Expand Up @@ -275,8 +275,7 @@ def migrate_knowledge_vector_database():
for dataset in datasets:
total_count = total_count + 1
click.echo(
f"Processing the {total_count} dataset {dataset.id}. "
+ f"{create_count} created, {skipped_count} skipped."
f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped."
)
try:
click.echo("Create dataset vdb index: {}".format(dataset.id))
Expand Down Expand Up @@ -594,7 +593,7 @@ def create_tenant(email: str, language: Optional[str] = None, name: Optional[str

click.echo(
click.style(
"Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password),
"Congratulations! Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password),
fg="green",
)
)
Expand Down
6 changes: 3 additions & 3 deletions api/configs/feature/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,12 +129,12 @@ class EndpointConfig(BaseSettings):
)

SERVICE_API_URL: str = Field(
description="Service API Url prefix." "used to display Service API Base Url to the front-end.",
description="Service API Url prefix. used to display Service API Base Url to the front-end.",
default="",
)

APP_WEB_URL: str = Field(
description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.",
description="WebApp Url prefix. used to display WebAPP API Base Url to the front-end.",
default="",
)

Expand Down Expand Up @@ -272,7 +272,7 @@ class LoggingConfig(BaseSettings):
"""

LOG_LEVEL: str = Field(
description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.",
description="Log output level, default to INFO. It is recommended to set it to ERROR for production.",
default="INFO",
)

Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/app/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,6 @@ def post(self, app_model: App):
api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
api.add_resource(
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs" "/<string:block_type>"
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs/<string:block_type>"
)
api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")
2 changes: 1 addition & 1 deletion api/controllers/console/datasets/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def post(self):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down
4 changes: 1 addition & 3 deletions api/controllers/console/error.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException):

class NotInitValidateError(BaseHTTPException):
error_code = "not_init_validated"
description = (
"Init validation has not been completed yet. " "Please proceed with the init validation process first."
)
description = "Init validation has not been completed yet. Please proceed with the init validation process first."
code = 401


Expand Down
2 changes: 1 addition & 1 deletion api/controllers/console/workspace/model_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def get(self, provider: str):
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
api.add_resource(
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/" "<string:icon_type>/<string:lang>"
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
)

api.add_resource(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def validate_and_set_defaults(cls, app_mode: AppMode, config: dict) -> tuple[dic
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
raise ValueError(
"chat_prompt_config or completion_prompt_config is required " "when prompt_type is advanced"
"chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
)

model_mode_vals = [mode.value for mode in ModelMode]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def validate_variables_and_set_defaults(cls, config: dict) -> tuple[dict, list[s

pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
if pattern.match(form_item["variable"]) is None:
raise ValueError("variable in user_input_form must be a string, " "and cannot start with a number")
raise ValueError("variable in user_input_form must be a string, and cannot start with a number")

variables.append(form_item["variable"])

Expand Down
2 changes: 1 addition & 1 deletion api/core/app/apps/base_app_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def check_hosting_moderation(
queue_manager=queue_manager,
app_generate_entity=application_generate_entity,
prompt_messages=prompt_messages,
text="I apologize for any confusion, " "but I'm an AI assistant to be helpful, harmless, and honest.",
text="I apologize for any confusion, but I'm an AI assistant to be helpful, harmless, and honest.",
stream=application_generate_entity.stream,
)

Expand Down
6 changes: 3 additions & 3 deletions api/core/app/apps/workflow_logging_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def on_workflow_node_execute_succeeded(self, event: NodeRunSucceededEvent) -> No
if route_node_state.node_run_result:
node_run_result = route_node_state.node_run_result
self.print_text(
f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
color="green",
)
self.print_text(
Expand Down Expand Up @@ -116,7 +116,7 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None:
node_run_result = route_node_state.node_run_result
self.print_text(f"Error: {node_run_result.error}", color="red")
self.print_text(
f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
color="red",
)
self.print_text(
Expand All @@ -125,7 +125,7 @@ def on_workflow_node_execute_failed(self, event: NodeRunFailedEvent) -> None:
color="red",
)
self.print_text(
f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
color="red",
)

Expand Down
2 changes: 1 addition & 1 deletion api/core/model_runtime/model_providers/__base/ai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def predefined_models(self) -> list[AIModelEntity]:
except Exception as e:
model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml")
raise Exception(
f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}:" f" {str(e)}"
f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}"
)

# cache model schema
Expand Down
2 changes: 1 addition & 1 deletion api/core/model_runtime/model_providers/cohere/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ def _convert_tools(self, tools: list[PromptMessageTool]) -> list[Tool]:

desc = p_val["description"]
if "enum" in p_val:
desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"

parameter_definitions[p_key] = ToolParameterDefinitionsValue(
description=desc, type=p_val["type"], required=required
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None:

if credentials["task_type"] not in ("text2text-generation", "text-generation"):
raise CredentialsValidateFailedError(
"Huggingface Hub Task Type must be one of text2text-generation, " "text-generation."
"Huggingface Hub Task Type must be one of text2text-generation, text-generation."
)

client = InferenceClient(token=credentials["huggingfacehub_api_token"])
Expand Down Expand Up @@ -282,7 +282,7 @@ def _get_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str):

valid_tasks = ("text2text-generation", "text-generation")
if model_info.pipeline_tag not in valid_tasks:
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
except Exception as e:
raise CredentialsValidateFailedError(f"{str(e)}")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def _check_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str

valid_tasks = "feature-extraction"
if model_info.pipeline_tag not in valid_tasks:
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
except Exception as e:
raise CredentialsValidateFailedError(f"{str(e)}")

Expand Down
4 changes: 2 additions & 2 deletions api/core/model_runtime/model_providers/ollama/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
label=I18nObject(en_US="Size of context window"),
type=ParameterType.INT,
help=I18nObject(
en_US="Sets the size of the context window used to generate the next token. " "(Default: 2048)"
en_US="Sets the size of the context window used to generate the next token. (Default: 2048)"
),
default=2048,
min=1,
Expand Down Expand Up @@ -650,7 +650,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
label=I18nObject(en_US="Format"),
type=ParameterType.STRING,
help=I18nObject(
en_US="the format to return a response in." " Currently the only accepted value is json."
en_US="the format to return a response in. Currently the only accepted value is json."
),
options=["json"],
),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None:

if model.count("/") != 1:
raise CredentialsValidateFailedError(
"Replicate Model Name must be provided, " "format: {user_name}/{model_name}"
"Replicate Model Name must be provided, format: {user_name}/{model_name}"
)

try:
Expand Down
2 changes: 1 addition & 1 deletion api/core/model_runtime/model_providers/tongyi/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ def _convert_tools(self, tools: list[PromptMessageTool]) -> list[dict]:
for p_key, p_val in properties.items():
desc = p_val["description"]
if "enum" in p_val:
desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"

properties_definitions[p_key] = {
"description": desc,
Expand Down
2 changes: 1 addition & 1 deletion api/core/rag/datasource/vdb/relyt/relyt_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def similarity_search_with_score_by_vector(
try:
from sqlalchemy.engine import Row
except ImportError:
raise ImportError("Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'.")
raise ImportError("Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'.")

filter_condition = ""
if filter is not None:
Expand Down
2 changes: 1 addition & 1 deletion api/core/rag/docstore/dataset_docstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def add_documents(self, docs: Sequence[Document], allow_update: bool = True) ->
# NOTE: doc could already exist in the store, but we overwrite it
if not allow_update and segment_document:
raise ValueError(
f"doc_id {doc.metadata['doc_id']} already exists. " "Set allow_update to True to overwrite."
f"doc_id {doc.metadata['doc_id']} already exists. Set allow_update to True to overwrite."
)

# calc embedding use tokens
Expand Down
2 changes: 1 addition & 1 deletion api/core/rag/extractor/notion_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
integration_token = dify_config.NOTION_INTEGRATION_TOKEN
if integration_token is None:
raise ValueError(
"Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
"Must specify `integration_token` or set environment variable `NOTION_INTEGRATION_TOKEN`."
)

self._notion_access_token = integration_token
Expand Down
6 changes: 3 additions & 3 deletions api/core/rag/splitter/text_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller."
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
Expand Down Expand Up @@ -117,7 +117,7 @@ def _merge_splits(self, splits: Iterable[str], separator: str, lengths: list[int
if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size:
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}"
f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
Expand Down Expand Up @@ -153,7 +153,7 @@ def _huggingface_tokenizer_length(text: str) -> int:

except ImportError:
raise ValueError(
"Could not import transformers python package. " "Please install it with `pip install transformers`."
"Could not import transformers python package. Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)

Expand Down
2 changes: 1 addition & 1 deletion api/core/tools/provider/builtin/gaode/gaode.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def _validate_credentials(self, credentials: dict) -> None:

try:
response = requests.get(
url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}" "".format(
url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}".format(
address=urllib.parse.quote("广东省广州市天河区广州塔"), apikey=credentials.get("api_key")
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def _invoke(
city_response = s.request(
method="GET",
headers={"Content-Type": "application/json; charset=utf-8"},
url="{url}/config/district?keywords={keywords}" "&subdistrict=0&extensions=base&key={apikey}" "".format(
url="{url}/config/district?keywords={keywords}&subdistrict=0&extensions=base&key={apikey}".format(
url=api_domain, keywords=city, apikey=self.runtime.credentials.get("api_key")
),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _invoke(
response = s.request(
method="GET",
headers=headers,
url=f"{api_domain}/search/repositories?" f"q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
url=f"{api_domain}/search/repositories?q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
)
response_data = response.json()
if response.status_code == 200 and isinstance(response_data.get("items"), list):
Expand Down
4 changes: 2 additions & 2 deletions api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def run(self, query: str) -> str:
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['pub_date']}\nTitle: {result['title']}\n" f"Summary: {result['summary']}"
f"Published: {result['pub_date']}\nTitle: {result['title']}\nSummary: {result['summary']}"
for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH])
]

Expand Down Expand Up @@ -97,7 +97,7 @@ def retrieve_article(self, uid: str, webenv: str) -> dict:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests error
# wait for an exponentially increasing amount of time
print(f"Too Many Requests, " f"waiting for {self.sleep_time:.2f} seconds...")
print(f"Too Many Requests, waiting for {self.sleep_time:.2f} seconds...")
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def set_validator(cls, values: dict) -> dict:
try:
from twilio.rest import Client
except ImportError:
raise ImportError("Could not import twilio python package. " "Please install it with `pip install twilio`.")
raise ImportError("Could not import twilio python package. Please install it with `pip install twilio`.")
account_sid = values.get("account_sid")
auth_token = values.get("auth_token")
values["from_number"] = values.get("from_number")
Expand Down
2 changes: 1 addition & 1 deletion api/libs/json_in_md_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
for key in expected_keys:
if key not in json_obj:
raise OutputParserError(
f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}"
f"Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"
)
return json_obj
Loading

0 comments on commit 6613b8f

Please sign in to comment.