Skip to content

Commit

Permalink
Add Fireworks AI as new model provider (#8428)
Browse files Browse the repository at this point in the history
  • Loading branch information
yaoice authored Sep 22, 2024
1 parent c8b9bde commit 0665268
Show file tree
Hide file tree
Showing 31 changed files with 1,683 additions and 2 deletions.
1 change: 1 addition & 0 deletions api/core/model_runtime/model_providers/_position.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,4 @@
- siliconflow
- perfxcloud
- zhinao
- fireworks
Empty file.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
52 changes: 52 additions & 0 deletions api/core/model_runtime/model_providers/fireworks/_common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from collections.abc import Mapping

import openai

from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)


class _CommonFireworks:
def _to_credential_kwargs(self, credentials: Mapping) -> dict:
"""
Transform credentials to kwargs for model instance
:param credentials:
:return:
"""
credentials_kwargs = {
"api_key": credentials["fireworks_api_key"],
"base_url": "https://api.fireworks.ai/inference/v1",
"max_retries": 1,
}

return credentials_kwargs

@property
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the error type thrown to the caller
The value is the error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke error mapping
"""
return {
InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError],
InvokeServerUnavailableError: [openai.InternalServerError],
InvokeRateLimitError: [openai.RateLimitError],
InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError],
InvokeBadRequestError: [
openai.BadRequestError,
openai.NotFoundError,
openai.UnprocessableEntityError,
openai.APIError,
],
}
27 changes: 27 additions & 0 deletions api/core/model_runtime/model_providers/fireworks/fireworks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import logging

from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.model_provider import ModelProvider

logger = logging.getLogger(__name__)


class FireworksProvider(ModelProvider):
def validate_provider_credentials(self, credentials: dict) -> None:
"""
Validate provider credentials
if validate failed, raise exception
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
"""
try:
model_instance = self.get_model_instance(ModelType.LLM)
model_instance.validate_credentials(
model="accounts/fireworks/models/llama-v3p1-8b-instruct", credentials=credentials
)
except CredentialsValidateFailedError as ex:
raise ex
except Exception as ex:
logger.exception(f"{self.get_provider_schema().provider} credentials validate failed")
raise ex
29 changes: 29 additions & 0 deletions api/core/model_runtime/model_providers/fireworks/fireworks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
provider: fireworks
label:
zh_Hans: Fireworks AI
en_US: Fireworks AI
icon_small:
en_US: icon_s_en.svg
icon_large:
en_US: icon_l_en.svg
background: "#FCFDFF"
help:
title:
en_US: Get your API Key from Fireworks AI
zh_Hans: 从 Fireworks AI 获取 API Key
url:
en_US: https://fireworks.ai/account/api-keys
supported_model_types:
- llm
configurate_methods:
- predefined-model
provider_credential_schema:
credential_form_schemas:
- variable: fireworks_api_key
label:
en_US: API Key
type: secret-input
required: true
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
- llama-v3p1-405b-instruct
- llama-v3p1-70b-instruct
- llama-v3p1-8b-instruct
- llama-v3-70b-instruct
- mixtral-8x22b-instruct
- mixtral-8x7b-instruct
- firefunction-v2
- firefunction-v1
- gemma2-9b-it
- llama-v3-70b-instruct-hf
- llama-v3-8b-instruct
- llama-v3-8b-instruct-hf
- mixtral-8x7b-instruct-hf
- mythomax-l2-13b
- phi-3-vision-128k-instruct
- yi-large
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
model: accounts/fireworks/models/firefunction-v1
label:
zh_Hans: Firefunction V1
en_US: Firefunction V1
model_type: llm
features:
- agent-thought
- tool-call
model_properties:
mode: chat
context_size: 32768
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.5'
output: '0.5'
unit: '0.000001'
currency: USD
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
model: accounts/fireworks/models/firefunction-v2
label:
zh_Hans: Firefunction V2
en_US: Firefunction V2
model_type: llm
features:
- agent-thought
- tool-call
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.9'
output: '0.9'
unit: '0.000001'
currency: USD
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
model: accounts/fireworks/models/gemma2-9b-it
label:
zh_Hans: Gemma2 9B Instruct
en_US: Gemma2 9B Instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.2'
output: '0.2'
unit: '0.000001'
currency: USD
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
model: accounts/fireworks/models/llama-v3-70b-instruct-hf
label:
zh_Hans: Llama3 70B Instruct(HF version)
en_US: Llama3 70B Instruct(HF version)
model_type: llm
features:
- agent-thought
- tool-call
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.9'
output: '0.9'
unit: '0.000001'
currency: USD
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
model: accounts/fireworks/models/llama-v3-70b-instruct
label:
zh_Hans: Llama3 70B Instruct
en_US: Llama3 70B Instruct
model_type: llm
features:
- agent-thought
- tool-call
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.9'
output: '0.9'
unit: '0.000001'
currency: USD
Loading

0 comments on commit 0665268

Please sign in to comment.