Skip to content

Commit

Permalink
Merge branch 'langgenius:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
luckylhb90 authored Aug 30, 2024
2 parents 2dc25e0 + ceb2b15 commit 91b8055
Show file tree
Hide file tree
Showing 6 changed files with 92 additions and 9 deletions.
7 changes: 5 additions & 2 deletions api/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,8 +559,9 @@ def add_qdrant_doc_id_index(field: str):

@click.command("create-tenant", help="Create account and tenant.")
@click.option("--email", prompt=True, help="The email address of the tenant account.")
@click.option("--name", prompt=True, help="The workspace name of the tenant account.")
@click.option("--language", prompt=True, help="Account language, default: en-US.")
def create_tenant(email: str, language: Optional[str] = None):
def create_tenant(email: str, language: Optional[str] = None, name: Optional[str] = None):
"""
Create tenant account
"""
Expand All @@ -580,13 +581,15 @@ def create_tenant(email: str, language: Optional[str] = None):
if language not in languages:
language = "en-US"

name = name.strip()

# generate random password
new_password = secrets.token_urlsafe(16)

# register account
account = RegisterService.register(email=email, name=account_name, password=new_password, language=language)

TenantService.create_owner_tenant_if_not_exist(account)
TenantService.create_owner_tenant_if_not_exist(account, name)

click.echo(
click.style(
Expand Down
39 changes: 39 additions & 0 deletions api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
model: glm-4-plus
label:
en_US: glm-4-plus
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
parameter_rules:
- name: temperature
use_template: temperature
default: 0.95
min: 0.0
max: 1.0
help:
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: top_p
use_template: top_p
default: 0.7
help:
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: incremental
label:
zh_Hans: 增量返回
en_US: Incremental
type: boolean
help:
zh_Hans: SSE接口调用时,用于控制每次返回内容方式是增量还是全量,不提供此参数时默认为增量返回,true 为增量返回,false 为全量返回。
en_US: When the SSE interface is called, it is used to control whether the content is returned incrementally or in full. If this parameter is not provided, the default is incremental return. true means incremental return, false means full return.
required: false
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 8192
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
model: glm-4v-plus
label:
en_US: glm-4v-plus
model_type: llm
model_properties:
mode: chat
features:
- vision
parameter_rules:
- name: temperature
use_template: temperature
default: 0.95
min: 0.0
max: 1.0
help:
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: top_p
use_template: top_p
default: 0.7
help:
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: incremental
label:
zh_Hans: 增量返回
en_US: Incremental
type: boolean
help:
zh_Hans: SSE接口调用时,用于控制每次返回内容方式是增量还是全量,不提供此参数时默认为增量返回,true 为增量返回,false 为全量返回。
en_US: When the SSE interface is called, it is used to control whether the content is returned incrementally or in full. If this parameter is not provided, the default is incremental return. true means incremental return, false means full return.
required: false
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 8192
9 changes: 5 additions & 4 deletions api/core/model_runtime/model_providers/zhipuai/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ def _generate(self, model: str, credentials_kwargs: dict,
:return: full response or stream response chunk generator result
"""
extra_model_kwargs = {}
if stop:
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error"
if stop and model!= 'glm-4v-plus':
extra_model_kwargs['stop'] = stop

client = ZhipuAI(
Expand All @@ -174,7 +175,7 @@ def _generate(self, model: str, credentials_kwargs: dict,
if copy_prompt_message.role in [PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL]:
if isinstance(copy_prompt_message.content, list):
# check if model is 'glm-4v'
if model != 'glm-4v':
if model not in ('glm-4v', 'glm-4v-plus'):
# not support list message
continue
# get image and
Expand Down Expand Up @@ -207,7 +208,7 @@ def _generate(self, model: str, credentials_kwargs: dict,
else:
new_prompt_messages.append(copy_prompt_message)

if model == 'glm-4v':
if model == 'glm-4v' or model == 'glm-4v-plus':
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
else:
params = {
Expand Down Expand Up @@ -304,7 +305,7 @@ def _construct_glm_4v_parameter(self, model: str, prompt_messages: list[PromptMe

return params

def _construct_glm_4v_messages(self, prompt_message: Union[str | list[PromptMessageContent]]) -> list[dict]:
def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]:
if isinstance(prompt_message, str):
return [{'type': 'text', 'text': prompt_message}]

Expand Down
7 changes: 5 additions & 2 deletions api/services/account_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def create_tenant(name: str) -> Tenant:
return tenant

@staticmethod
def create_owner_tenant_if_not_exist(account: Account):
def create_owner_tenant_if_not_exist(account: Account, name: Optional[str] = None):
"""Create owner tenant if not exist"""
available_ta = (
TenantAccountJoin.query.filter_by(account_id=account.id).order_by(TenantAccountJoin.id.asc()).first()
Expand All @@ -274,7 +274,10 @@ def create_owner_tenant_if_not_exist(account: Account):
if available_ta:
return

tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
if name:
tenant = TenantService.create_tenant(name)
else:
tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
TenantService.create_tenant_member(tenant, account, role="owner")
account.current_tenant = tenant
db.session.commit()
Expand Down
2 changes: 1 addition & 1 deletion web/i18n/zh-Hans/workflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ const translation = {
},
env: {
envPanelTitle: '环境变量',
envDescription: '环境变量是一种存储敏感信息的方法,如 API 密钥、数据库密码等。它们被存储在工作流程中,而不是代码中,以便在不同环墋中共享。',
envDescription: '环境变量是一种存储敏感信息的方法,如 API 密钥、数据库密码等。它们被存储在工作流程中,而不是代码中,以便在不同环境中共享。',
envPanelButton: '添加环境变量',
modal: {
title: '添加环境变量',
Expand Down

0 comments on commit 91b8055

Please sign in to comment.