diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 13b1313..164f43e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,8 +9,8 @@ sources: - main mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 - sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be tags: - latest - main @@ -32,8 +32,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:6d8dfe3442be38fdac56eadabec39bcbe9e2d652080802d9cc46a68c28b38dbc - sourceBlobDigest: sha256:a4c011f461c73809a7d6cf1c9823d3c51d5050895aad246287ff14ac971efb8c + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be + codeSamplesNamespace: mistral-openapi-google-cloud-code-samples + codeSamplesRevisionDigest: sha256:5126fed27553befa9735d0e0c2abdc089d6f6a22efe89475df630e375b496241 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/mistralai-gcp/.speakeasy/gen.lock b/packages/mistralai-gcp/.speakeasy/gen.lock index 6ac5422..6fd4fef 100644 --- a/packages/mistralai-gcp/.speakeasy/gen.lock +++ b/packages/mistralai-gcp/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: c6044247-eaf9-46da-b078-0e1334e93be2 management: - docChecksum: 823d9b94fcb9c6588d0af16b7301f4ac + docChecksum: 46baf8da7636ea1bf44557571d011045 docVersion: 0.0.2 - speakeasyVersion: 1.399.3 - generationVersion: 2.420.2 - releaseVersion: 1.1.0 - configChecksum: 814cc68b27946d6b8e7bc99c8a2cbf1b + speakeasyVersion: 1.434.7 + generationVersion: 2.452.0 + releaseVersion: 1.3.0 + configChecksum: 3b3800ab2f8b198bdbd47807fbe76770 repoURL: https://github.com/mistralai/client-ts.git repoSubDirectory: packages/mistralai-gcp installationURL: https://gitpkg.now.sh/mistralai/client-ts/packages/mistralai-gcp @@ -15,10 +15,10 @@ features: typescript: additionalDependencies: 0.1.0 constsAndDefaults: 0.1.11 - core: 3.17.5 + core: 3.18.3 defaultEnabledRetries: 0.1.0 enumUnions: 0.1.0 - envVarSecurityUsage: 0.1.1 + envVarSecurityUsage: 0.1.2 examples: 2.81.4 globalSecurity: 2.82.11 globalSecurityCallbacks: 0.1.0 @@ -29,11 +29,12 @@ features: openEnums: 0.1.1 responseFormat: 0.2.3 retries: 2.83.0 - sdkHooks: 0.1.0 - serverEvents: 0.1.4 + sdkHooks: 0.2.0 + serverEvents: 0.1.5 serverEventsSentinels: 0.1.0 serverIDs: 2.81.2 - unions: 2.85.7 + tests: 0.7.0 + unions: 2.85.8 generatedFiles: - .eslintrc.cjs - .gitattributes @@ -43,6 +44,7 @@ generatedFiles: - docs/lib/utils/retryconfig.md - docs/models/components/arguments.md - docs/models/components/assistantmessage.md + - docs/models/components/assistantmessagecontent.md - docs/models/components/assistantmessagerole.md - docs/models/components/chatcompletionchoice.md - docs/models/components/chatcompletionchoicefinishreason.md @@ -76,6 +78,7 @@ generatedFiles: - docs/models/components/security.md - docs/models/components/stop.md - docs/models/components/systemmessage.md + - docs/models/components/systemmessagecontent.md - docs/models/components/textchunk.md - docs/models/components/tool.md - docs/models/components/toolcall.md diff --git a/packages/mistralai-gcp/.speakeasy/gen.yaml b/packages/mistralai-gcp/.speakeasy/gen.yaml index c0709fc..81014db 100644 --- a/packages/mistralai-gcp/.speakeasy/gen.yaml +++ b/packages/mistralai-gcp/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false typescript: - version: 1.1.0 + version: 1.3.0 additionalDependencies: dependencies: google-auth-library: ^9.11.0 @@ -23,6 +24,7 @@ typescript: clientServerStatusCodesAsErrors: true enumFormat: union flattenGlobalSecurity: true + flatteningOrder: body-first imports: option: openapi paths: diff --git a/packages/mistralai-gcp/FUNCTIONS.md b/packages/mistralai-gcp/FUNCTIONS.md index 08bb286..3a89845 100644 --- a/packages/mistralai-gcp/FUNCTIONS.md +++ b/packages/mistralai-gcp/FUNCTIONS.md @@ -60,7 +60,7 @@ async function run() { const { value: result } = res; // Handle the result - console.log(result) + console.log(result); } run(); diff --git a/packages/mistralai-gcp/RELEASES.md b/packages/mistralai-gcp/RELEASES.md index 298ca1c..4141196 100644 --- a/packages/mistralai-gcp/RELEASES.md +++ b/packages/mistralai-gcp/RELEASES.md @@ -48,4 +48,14 @@ Based on: ### Generated - [typescript v1.1.0] packages/mistralai-gcp ### Releases -- [NPM v1.1.0] https://www.npmjs.com/package/@mistralai/mistralai-gcp/v/1.1.0 - packages/mistralai-gcp \ No newline at end of file +- [NPM v1.1.0] https://www.npmjs.com/package/@mistralai/mistralai-gcp/v/1.1.0 - packages/mistralai-gcp + +## 2024-11-08 13:01:40 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.7 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v1.3.0] packages/mistralai-gcp +### Releases +- [NPM v1.3.0] https://www.npmjs.com/package/@mistralai/mistralai-gcp/v/1.3.0 - packages/mistralai-gcp \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/arguments.md b/packages/mistralai-gcp/docs/models/components/arguments.md index 63fac53..ef0bdc6 100644 --- a/packages/mistralai-gcp/docs/models/components/arguments.md +++ b/packages/mistralai-gcp/docs/models/components/arguments.md @@ -1,24 +1,19 @@ # Arguments -## Example Usage - -```typescript -import { Arguments } from "@mistralai/mistralai-gcp/models/components"; - -let value: Arguments = ""; -``` ## Supported Types ### `{ [k: string]: any }` ```typescript -const value: { [k: string]: any } = /* values here */ +const value: { [k: string]: any } = { + "key": "", +}; ``` ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` diff --git a/packages/mistralai-gcp/docs/models/components/assistantmessage.md b/packages/mistralai-gcp/docs/models/components/assistantmessage.md index cd15de7..06815be 100644 --- a/packages/mistralai-gcp/docs/models/components/assistantmessage.md +++ b/packages/mistralai-gcp/docs/models/components/assistantmessage.md @@ -10,9 +10,9 @@ let value: AssistantMessage = {}; ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *string* | :heavy_minus_sign: | N/A | -| `toolCalls` | [components.ToolCall](../../models/components/toolcall.md)[] | :heavy_minus_sign: | N/A | -| `prefix` | *boolean* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [components.AssistantMessageRole](../../models/components/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `content` | *components.AssistantMessageContent* | :heavy_minus_sign: | N/A | +| `toolCalls` | [components.ToolCall](../../models/components/toolcall.md)[] | :heavy_minus_sign: | N/A | +| `prefix` | *boolean* | :heavy_minus_sign: | N/A | +| `role` | [components.AssistantMessageRole](../../models/components/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/assistantmessagecontent.md b/packages/mistralai-gcp/docs/models/components/assistantmessagecontent.md new file mode 100644 index 0000000..67d0370 --- /dev/null +++ b/packages/mistralai-gcp/docs/models/components/assistantmessagecontent.md @@ -0,0 +1,21 @@ +# AssistantMessageContent + + +## Supported Types + +### `string` + +```typescript +const value: string = ""; +``` + +### `components.ContentChunk[]` + +```typescript +const value: components.ContentChunk[] = [ + { + text: "", + }, +]; +``` + diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionchoicefinishreason.md b/packages/mistralai-gcp/docs/models/components/chatcompletionchoicefinishreason.md index 93ef7b4..b898e85 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionchoicefinishreason.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionchoicefinishreason.md @@ -10,6 +10,8 @@ let value: ChatCompletionChoiceFinishReason = "stop"; ## Values +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + ```typescript -"stop" | "length" | "model_length" | "error" | "tool_calls" +"stop" | "length" | "model_length" | "error" | "tool_calls" | Unrecognized ``` \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md b/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md index 3e9a7f4..2b9f600 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md @@ -18,17 +18,19 @@ let value: ChatCompletionRequest = { ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | *components.ChatCompletionRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | *components.ChatCompletionRequestMessages*[] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `responseFormat` | [components.ResponseFormat](../../models/components/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | [components.Tool](../../models/components/tool.md)[] | :heavy_minus_sign: | N/A | | -| `toolChoice` | *components.ChatCompletionRequestToolChoice* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | *components.ChatCompletionRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | *components.ChatCompletionRequestMessages*[] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `responseFormat` | [components.ResponseFormat](../../models/components/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | [components.Tool](../../models/components/tool.md)[] | :heavy_minus_sign: | N/A | | +| `toolChoice` | *components.ChatCompletionRequestToolChoice* | :heavy_minus_sign: | N/A | | +| `presencePenalty` | *number* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *number* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionrequestmessages.md b/packages/mistralai-gcp/docs/models/components/chatcompletionrequestmessages.md index 17342b5..63c51e9 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionrequestmessages.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionrequestmessages.md @@ -1,42 +1,39 @@ # ChatCompletionRequestMessages -## Example Usage - -```typescript -import { ChatCompletionRequestMessages } from "@mistralai/mistralai-gcp/models/components"; - -let value: ChatCompletionRequestMessages = { - content: [ - { - text: "", - }, - ], -}; -``` ## Supported Types ### `components.AssistantMessage` ```typescript -const value: components.AssistantMessage = /* values here */ +const value: components.AssistantMessage = {}; ``` ### `components.SystemMessage` ```typescript -const value: components.SystemMessage = /* values here */ +const value: components.SystemMessage = { + content: [ + { + text: "", + }, + ], +}; ``` ### `components.ToolMessage` ```typescript -const value: components.ToolMessage = /* values here */ +const value: components.ToolMessage = { + content: "", +}; ``` ### `components.UserMessage` ```typescript -const value: components.UserMessage = /* values here */ +const value: components.UserMessage = { + content: "", +}; ``` diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionrequeststop.md b/packages/mistralai-gcp/docs/models/components/chatcompletionrequeststop.md index 14eeca0..5cf805d 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionrequeststop.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionrequeststop.md @@ -2,27 +2,20 @@ Stop generation if this token is detected. Or if one of these tokens is detected when providing an array -## Example Usage - -```typescript -import { ChatCompletionRequestStop } from "@mistralai/mistralai-gcp/models/components"; - -let value: ChatCompletionRequestStop = [ - "", -]; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `string[]` ```typescript -const value: string[] = /* values here */ +const value: string[] = [ + "", +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionrequesttoolchoice.md b/packages/mistralai-gcp/docs/models/components/chatcompletionrequesttoolchoice.md index a19effd..8750103 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionrequesttoolchoice.md @@ -1,24 +1,21 @@ # ChatCompletionRequestToolChoice -## Example Usage - -```typescript -import { ChatCompletionRequestToolChoice } from "@mistralai/mistralai-gcp/models/components"; - -let value: ChatCompletionRequestToolChoice = "auto"; -``` ## Supported Types ### `components.ToolChoice` ```typescript -const value: components.ToolChoice = /* values here */ +const value: components.ToolChoice = { + function: { + name: "", + }, +}; ``` ### `components.ToolChoiceEnum` ```typescript -const value: components.ToolChoiceEnum = /* values here */ +const value: components.ToolChoiceEnum = "required"; ``` diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md index ce25c95..0a4167a 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md @@ -18,17 +18,19 @@ let value: ChatCompletionStreamRequest = { ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *boolean* | :heavy_minus_sign: | N/A | | -| `stop` | *components.Stop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | *components.Messages*[] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `responseFormat` | [components.ResponseFormat](../../models/components/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | [components.Tool](../../models/components/tool.md)[] | :heavy_minus_sign: | N/A | | -| `toolChoice` | *components.ChatCompletionStreamRequestToolChoice* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *boolean* | :heavy_minus_sign: | N/A | | +| `stop` | *components.Stop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | *components.Messages*[] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `responseFormat` | [components.ResponseFormat](../../models/components/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | [components.Tool](../../models/components/tool.md)[] | :heavy_minus_sign: | N/A | | +| `toolChoice` | *components.ChatCompletionStreamRequestToolChoice* | :heavy_minus_sign: | N/A | | +| `presencePenalty` | *number* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *number* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequesttoolchoice.md index d4eb9d8..d61fd6c 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequesttoolchoice.md @@ -1,24 +1,21 @@ # ChatCompletionStreamRequestToolChoice -## Example Usage - -```typescript -import { ChatCompletionStreamRequestToolChoice } from "@mistralai/mistralai-gcp/models/components"; - -let value: ChatCompletionStreamRequestToolChoice = "none"; -``` ## Supported Types ### `components.ToolChoice` ```typescript -const value: components.ToolChoice = /* values here */ +const value: components.ToolChoice = { + function: { + name: "", + }, +}; ``` ### `components.ToolChoiceEnum` ```typescript -const value: components.ToolChoiceEnum = /* values here */ +const value: components.ToolChoiceEnum = "auto"; ``` diff --git a/packages/mistralai-gcp/docs/models/components/completionchunk.md b/packages/mistralai-gcp/docs/models/components/completionchunk.md index 1c16b98..feac1c0 100644 --- a/packages/mistralai-gcp/docs/models/components/completionchunk.md +++ b/packages/mistralai-gcp/docs/models/components/completionchunk.md @@ -7,7 +7,7 @@ import { CompletionChunk } from "@mistralai/mistralai-gcp/models/components"; let value: CompletionChunk = { id: "", - model: "Spyder", + model: "Golf", usage: { promptTokens: 16, completionTokens: 34, @@ -15,9 +15,9 @@ let value: CompletionChunk = { }, choices: [ { - index: 602763, + index: 423655, delta: {}, - finishReason: "tool_calls", + finishReason: "error", }, ], }; diff --git a/packages/mistralai-gcp/docs/models/components/completionevent.md b/packages/mistralai-gcp/docs/models/components/completionevent.md index dfafceb..1e30d6e 100644 --- a/packages/mistralai-gcp/docs/models/components/completionevent.md +++ b/packages/mistralai-gcp/docs/models/components/completionevent.md @@ -8,7 +8,7 @@ import { CompletionEvent } from "@mistralai/mistralai-gcp/models/components"; let value: CompletionEvent = { data: { id: "", - model: "Model S", + model: "Golf", usage: { promptTokens: 16, completionTokens: 34, @@ -16,7 +16,7 @@ let value: CompletionEvent = { }, choices: [ { - index: 592845, + index: 715190, delta: {}, finishReason: "error", }, diff --git a/packages/mistralai-gcp/docs/models/components/completionresponsestreamchoice.md b/packages/mistralai-gcp/docs/models/components/completionresponsestreamchoice.md index 2f63622..823eecd 100644 --- a/packages/mistralai-gcp/docs/models/components/completionresponsestreamchoice.md +++ b/packages/mistralai-gcp/docs/models/components/completionresponsestreamchoice.md @@ -6,9 +6,9 @@ import { CompletionResponseStreamChoice } from "@mistralai/mistralai-gcp/models/components"; let value: CompletionResponseStreamChoice = { - index: 847252, + index: 891773, delta: {}, - finishReason: "length", + finishReason: "tool_calls", }; ``` diff --git a/packages/mistralai-gcp/docs/models/components/content.md b/packages/mistralai-gcp/docs/models/components/content.md index 576b429..99ff58d 100644 --- a/packages/mistralai-gcp/docs/models/components/content.md +++ b/packages/mistralai-gcp/docs/models/components/content.md @@ -1,28 +1,21 @@ # Content -## Example Usage - -```typescript -import { Content } from "@mistralai/mistralai-gcp/models/components"; - -let value: Content = [ - { - text: "", - }, -]; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` -### `components.TextChunk[]` +### `components.ContentChunk[]` ```typescript -const value: components.TextChunk[] = /* values here */ +const value: components.ContentChunk[] = [ + { + text: "", + }, +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/contentchunk.md b/packages/mistralai-gcp/docs/models/components/contentchunk.md index a3e8220..0c7fa7d 100644 --- a/packages/mistralai-gcp/docs/models/components/contentchunk.md +++ b/packages/mistralai-gcp/docs/models/components/contentchunk.md @@ -1,20 +1,13 @@ # ContentChunk -## Example Usage - -```typescript -import { ContentChunk } from "@mistralai/mistralai-gcp/models/components"; - -let value: ContentChunk = { - text: "", -}; -``` ## Supported Types ### `components.TextChunk` ```typescript -const value: components.TextChunk = /* values here */ +const value: components.TextChunk = { + text: "", +}; ``` diff --git a/packages/mistralai-gcp/docs/models/components/deltamessage.md b/packages/mistralai-gcp/docs/models/components/deltamessage.md index beae339..3f287bc 100644 --- a/packages/mistralai-gcp/docs/models/components/deltamessage.md +++ b/packages/mistralai-gcp/docs/models/components/deltamessage.md @@ -13,5 +13,5 @@ let value: DeltaMessage = {}; | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | `role` | *string* | :heavy_minus_sign: | N/A | -| `content` | *string* | :heavy_minus_sign: | N/A | +| `content` | *components.Content* | :heavy_minus_sign: | N/A | | `toolCalls` | [components.ToolCall](../../models/components/toolcall.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/fimcompletionrequest.md b/packages/mistralai-gcp/docs/models/components/fimcompletionrequest.md index d388000..bb17891 100644 --- a/packages/mistralai-gcp/docs/models/components/fimcompletionrequest.md +++ b/packages/mistralai-gcp/docs/models/components/fimcompletionrequest.md @@ -14,15 +14,15 @@ let value: FIMCompletionRequest = { ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | *components.FIMCompletionRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `prompt` | *string* | :heavy_check_mark: | The text/code to complete. | def | -| `suffix` | *string* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | *components.FIMCompletionRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *string* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *string* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/fimcompletionrequeststop.md b/packages/mistralai-gcp/docs/models/components/fimcompletionrequeststop.md index ef5dd96..84c5828 100644 --- a/packages/mistralai-gcp/docs/models/components/fimcompletionrequeststop.md +++ b/packages/mistralai-gcp/docs/models/components/fimcompletionrequeststop.md @@ -2,25 +2,20 @@ Stop generation if this token is detected. Or if one of these tokens is detected when providing an array -## Example Usage - -```typescript -import { FIMCompletionRequestStop } from "@mistralai/mistralai-gcp/models/components"; - -let value: FIMCompletionRequestStop = ""; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `string[]` ```typescript -const value: string[] = /* values here */ +const value: string[] = [ + "", +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequest.md b/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequest.md index 1b80155..395558a 100644 --- a/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequest.md +++ b/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequest.md @@ -14,15 +14,15 @@ let value: FIMCompletionStreamRequest = { ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *boolean* | :heavy_minus_sign: | N/A | | -| `stop` | *components.FIMCompletionStreamRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `prompt` | *string* | :heavy_check_mark: | The text/code to complete. | def | -| `suffix` | *string* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *boolean* | :heavy_minus_sign: | N/A | | +| `stop` | *components.FIMCompletionStreamRequestStop* | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `randomSeed` | *number* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *string* | :heavy_check_mark: | The text/code to complete. | def | +| `suffix` | *string* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `minTokens` | *number* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequeststop.md b/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequeststop.md index 5b776a3..376ae82 100644 --- a/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequeststop.md +++ b/packages/mistralai-gcp/docs/models/components/fimcompletionstreamrequeststop.md @@ -2,25 +2,20 @@ Stop generation if this token is detected. Or if one of these tokens is detected when providing an array -## Example Usage - -```typescript -import { FIMCompletionStreamRequestStop } from "@mistralai/mistralai-gcp/models/components"; - -let value: FIMCompletionStreamRequestStop = ""; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `string[]` ```typescript -const value: string[] = /* values here */ +const value: string[] = [ + "", +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/finishreason.md b/packages/mistralai-gcp/docs/models/components/finishreason.md index aa34c23..f284ebe 100644 --- a/packages/mistralai-gcp/docs/models/components/finishreason.md +++ b/packages/mistralai-gcp/docs/models/components/finishreason.md @@ -5,11 +5,13 @@ ```typescript import { FinishReason } from "@mistralai/mistralai-gcp/models/components"; -let value: FinishReason = "error"; +let value: FinishReason = "length"; ``` ## Values +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + ```typescript -"stop" | "length" | "error" | "tool_calls" +"stop" | "length" | "error" | "tool_calls" | Unrecognized ``` \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/functioncall.md b/packages/mistralai-gcp/docs/models/components/functioncall.md index 04b3f85..326a14c 100644 --- a/packages/mistralai-gcp/docs/models/components/functioncall.md +++ b/packages/mistralai-gcp/docs/models/components/functioncall.md @@ -7,9 +7,7 @@ import { FunctionCall } from "@mistralai/mistralai-gcp/models/components"; let value: FunctionCall = { name: "", - arguments: { - "key": "", - }, + arguments: "", }; ``` diff --git a/packages/mistralai-gcp/docs/models/components/loc.md b/packages/mistralai-gcp/docs/models/components/loc.md index a87585f..0d8fe4c 100644 --- a/packages/mistralai-gcp/docs/models/components/loc.md +++ b/packages/mistralai-gcp/docs/models/components/loc.md @@ -1,24 +1,17 @@ # Loc -## Example Usage - -```typescript -import { Loc } from "@mistralai/mistralai-gcp/models/components"; - -let value: Loc = ""; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `number` ```typescript -const value: number = /* values here */ +const value: number = 528895; ``` diff --git a/packages/mistralai-gcp/docs/models/components/messages.md b/packages/mistralai-gcp/docs/models/components/messages.md index d899339..020761e 100644 --- a/packages/mistralai-gcp/docs/models/components/messages.md +++ b/packages/mistralai-gcp/docs/models/components/messages.md @@ -1,36 +1,35 @@ # Messages -## Example Usage - -```typescript -import { Messages } from "@mistralai/mistralai-gcp/models/components"; - -let value: Messages = {}; -``` ## Supported Types ### `components.AssistantMessage` ```typescript -const value: components.AssistantMessage = /* values here */ +const value: components.AssistantMessage = {}; ``` ### `components.SystemMessage` ```typescript -const value: components.SystemMessage = /* values here */ +const value: components.SystemMessage = { + content: "", +}; ``` ### `components.ToolMessage` ```typescript -const value: components.ToolMessage = /* values here */ +const value: components.ToolMessage = { + content: "", +}; ``` ### `components.UserMessage` ```typescript -const value: components.UserMessage = /* values here */ +const value: components.UserMessage = { + content: "", +}; ``` diff --git a/packages/mistralai-gcp/docs/models/components/responseformats.md b/packages/mistralai-gcp/docs/models/components/responseformats.md index 27021bb..9f0f102 100644 --- a/packages/mistralai-gcp/docs/models/components/responseformats.md +++ b/packages/mistralai-gcp/docs/models/components/responseformats.md @@ -7,7 +7,7 @@ An object specifying the format that the model must output. Setting to `{ "type" ```typescript import { ResponseFormats } from "@mistralai/mistralai-gcp/models/components"; -let value: ResponseFormats = "text"; +let value: ResponseFormats = "json_object"; ``` ## Values diff --git a/packages/mistralai-gcp/docs/models/components/stop.md b/packages/mistralai-gcp/docs/models/components/stop.md index ee16521..f26b1bf 100644 --- a/packages/mistralai-gcp/docs/models/components/stop.md +++ b/packages/mistralai-gcp/docs/models/components/stop.md @@ -2,27 +2,20 @@ Stop generation if this token is detected. Or if one of these tokens is detected when providing an array -## Example Usage - -```typescript -import { Stop } from "@mistralai/mistralai-gcp/models/components"; - -let value: Stop = [ - "", -]; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `string[]` ```typescript -const value: string[] = /* values here */ +const value: string[] = [ + "", +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/systemmessage.md b/packages/mistralai-gcp/docs/models/components/systemmessage.md index 183c00e..116f774 100644 --- a/packages/mistralai-gcp/docs/models/components/systemmessage.md +++ b/packages/mistralai-gcp/docs/models/components/systemmessage.md @@ -18,5 +18,5 @@ let value: SystemMessage = { | Field | Type | Required | Description | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `content` | *components.Content* | :heavy_check_mark: | N/A | +| `content` | *components.SystemMessageContent* | :heavy_check_mark: | N/A | | `role` | [components.Role](../../models/components/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai-gcp/docs/models/components/systemmessagecontent.md b/packages/mistralai-gcp/docs/models/components/systemmessagecontent.md new file mode 100644 index 0000000..cda9bed --- /dev/null +++ b/packages/mistralai-gcp/docs/models/components/systemmessagecontent.md @@ -0,0 +1,21 @@ +# SystemMessageContent + + +## Supported Types + +### `string` + +```typescript +const value: string = ""; +``` + +### `components.TextChunk[]` + +```typescript +const value: components.TextChunk[] = [ + { + text: "", + }, +]; +``` + diff --git a/packages/mistralai-gcp/docs/models/components/toolcall.md b/packages/mistralai-gcp/docs/models/components/toolcall.md index a512f01..ffa4c87 100644 --- a/packages/mistralai-gcp/docs/models/components/toolcall.md +++ b/packages/mistralai-gcp/docs/models/components/toolcall.md @@ -8,7 +8,9 @@ import { ToolCall } from "@mistralai/mistralai-gcp/models/components"; let value: ToolCall = { function: { name: "", - arguments: "", + arguments: { + "key": "", + }, }, }; ``` diff --git a/packages/mistralai-gcp/docs/models/components/toolchoiceenum.md b/packages/mistralai-gcp/docs/models/components/toolchoiceenum.md index 76d0c73..6283aa3 100644 --- a/packages/mistralai-gcp/docs/models/components/toolchoiceenum.md +++ b/packages/mistralai-gcp/docs/models/components/toolchoiceenum.md @@ -5,7 +5,7 @@ ```typescript import { ToolChoiceEnum } from "@mistralai/mistralai-gcp/models/components"; -let value: ToolChoiceEnum = "none"; +let value: ToolChoiceEnum = "required"; ``` ## Values diff --git a/packages/mistralai-gcp/docs/models/components/usermessage.md b/packages/mistralai-gcp/docs/models/components/usermessage.md index e64d8ce..d451018 100644 --- a/packages/mistralai-gcp/docs/models/components/usermessage.md +++ b/packages/mistralai-gcp/docs/models/components/usermessage.md @@ -6,7 +6,11 @@ import { UserMessage } from "@mistralai/mistralai-gcp/models/components"; let value: UserMessage = { - content: "", + content: [ + { + text: "", + }, + ], }; ``` diff --git a/packages/mistralai-gcp/docs/models/components/usermessagecontent.md b/packages/mistralai-gcp/docs/models/components/usermessagecontent.md index 661cd32..f949165 100644 --- a/packages/mistralai-gcp/docs/models/components/usermessagecontent.md +++ b/packages/mistralai-gcp/docs/models/components/usermessagecontent.md @@ -1,28 +1,21 @@ # UserMessageContent -## Example Usage - -```typescript -import { UserMessageContent } from "@mistralai/mistralai-gcp/models/components"; - -let value: UserMessageContent = [ - { - text: "", - }, -]; -``` ## Supported Types ### `string` ```typescript -const value: string = /* values here */ +const value: string = ""; ``` ### `components.ContentChunk[]` ```typescript -const value: components.ContentChunk[] = /* values here */ +const value: components.ContentChunk[] = [ + { + text: "", + }, +]; ``` diff --git a/packages/mistralai-gcp/docs/models/components/validationerror.md b/packages/mistralai-gcp/docs/models/components/validationerror.md index 704c8e3..a052e97 100644 --- a/packages/mistralai-gcp/docs/models/components/validationerror.md +++ b/packages/mistralai-gcp/docs/models/components/validationerror.md @@ -7,7 +7,7 @@ import { ValidationError } from "@mistralai/mistralai-gcp/models/components"; let value: ValidationError = { loc: [ - "", + 925597, ], msg: "", type: "", diff --git a/packages/mistralai-gcp/docs/models/errors/httpvalidationerror.md b/packages/mistralai-gcp/docs/models/errors/httpvalidationerror.md index 50f1ce9..bc5d014 100644 --- a/packages/mistralai-gcp/docs/models/errors/httpvalidationerror.md +++ b/packages/mistralai-gcp/docs/models/errors/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Example Usage ```typescript diff --git a/packages/mistralai-gcp/jsr.json b/packages/mistralai-gcp/jsr.json index 9677881..fb73caf 100644 --- a/packages/mistralai-gcp/jsr.json +++ b/packages/mistralai-gcp/jsr.json @@ -2,7 +2,7 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.0", + "version": "1.3.0", "exports": { ".": "./src/index.ts", "./models/errors": "./src/models/errors/index.ts", diff --git a/packages/mistralai-gcp/package-lock.json b/packages/mistralai-gcp/package-lock.json index 1f2b867..c983f61 100644 --- a/packages/mistralai-gcp/package-lock.json +++ b/packages/mistralai-gcp/package-lock.json @@ -1,12 +1,12 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.0", + "version": "1.3.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@mistralai/mistralai-gcp", - "version": "1.1.0", + "version": "1.3.0", "dependencies": { "google-auth-library": "^9.11.0" }, diff --git a/packages/mistralai-gcp/package.json b/packages/mistralai-gcp/package.json index 1f7e3ea..846e4b4 100644 --- a/packages/mistralai-gcp/package.json +++ b/packages/mistralai-gcp/package.json @@ -1,6 +1,6 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.0", + "version": "1.3.0", "author": "Speakeasy", "main": "./index.js", "sideEffects": false, diff --git a/packages/mistralai-gcp/src/funcs/chatComplete.ts b/packages/mistralai-gcp/src/funcs/chatComplete.ts index 375cb01..14fbc4c 100644 --- a/packages/mistralai-gcp/src/funcs/chatComplete.ts +++ b/packages/mistralai-gcp/src/funcs/chatComplete.ts @@ -3,9 +3,9 @@ */ import { MistralGoogleCloudCore } from "../core.js"; -import { encodeJSON as encodeJSON$ } from "../lib/encodings.js"; -import * as m$ from "../lib/matchers.js"; -import * as schemas$ from "../lib/schemas.js"; +import { encodeJSON } from "../lib/encodings.js"; +import * as M from "../lib/matchers.js"; +import { safeParse } from "../lib/schemas.js"; import { RequestOptions } from "../lib/sdks.js"; import { extractSecurity, resolveGlobalSecurity } from "../lib/security.js"; import { pathToFunc } from "../lib/url.js"; @@ -26,7 +26,7 @@ import { Result } from "../types/fp.js"; * Chat Completion */ export async function chatComplete( - client$: MistralGoogleCloudCore, + client: MistralGoogleCloudCore, request: components.ChatCompletionRequest, options?: RequestOptions, ): Promise< @@ -42,65 +42,67 @@ export async function chatComplete( | ConnectionError > > { - const input$ = request; - - const parsed$ = schemas$.safeParse( - input$, - (value$) => components.ChatCompletionRequest$outboundSchema.parse(value$), + const parsed = safeParse( + request, + (value) => components.ChatCompletionRequest$outboundSchema.parse(value), "Input validation failed", ); - if (!parsed$.ok) { - return parsed$; + if (!parsed.ok) { + return parsed; } - const payload$ = parsed$.value; - const body$ = encodeJSON$("body", payload$, { explode: true }); + const payload = parsed.value; + const body = encodeJSON("body", payload, { explode: true }); - const path$ = pathToFunc("/rawPredict")(); + const path = pathToFunc("/rawPredict")(); - const headers$ = new Headers({ + const headers = new Headers({ "Content-Type": "application/json", Accept: "application/json", }); - const apiKey$ = await extractSecurity(client$.options$.apiKey); - const security$ = apiKey$ == null ? {} : { apiKey: apiKey$ }; + const secConfig = await extractSecurity(client._options.apiKey); + const securityInput = secConfig == null ? {} : { apiKey: secConfig }; + const requestSecurity = resolveGlobalSecurity(securityInput); + const context = { operationID: "chat_completion_v1_chat_completions_post", oAuth2Scopes: [], - securitySource: client$.options$.apiKey, + securitySource: client._options.apiKey, + retryConfig: options?.retries + || client._options.retryConfig + || { strategy: "none" }, + retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], }; - const securitySettings$ = resolveGlobalSecurity(security$); - const requestRes = client$.createRequest$(context, { - security: securitySettings$, + const requestRes = client._createRequest(context, { + security: requestSecurity, method: "POST", - path: path$, - headers: headers$, - body: body$, - timeoutMs: options?.timeoutMs || client$.options$.timeoutMs || -1, + path: path, + headers: headers, + body: body, + timeoutMs: options?.timeoutMs || client._options.timeoutMs || -1, }, options); if (!requestRes.ok) { return requestRes; } - const request$ = requestRes.value; + const req = requestRes.value; - const doResult = await client$.do$(request$, { + const doResult = await client._do(req, { context, errorCodes: ["422", "4XX", "5XX"], - retryConfig: options?.retries - || client$.options$.retryConfig, - retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], + retryConfig: context.retryConfig, + retryCodes: context.retryCodes, }); if (!doResult.ok) { return doResult; } const response = doResult.value; - const responseFields$ = { - HttpMeta: { Response: response, Request: request$ }, + const responseFields = { + HttpMeta: { Response: response, Request: req }, }; - const [result$] = await m$.match< + const [result] = await M.match< components.ChatCompletionResponse, | errors.HTTPValidationError | SDKError @@ -111,13 +113,13 @@ export async function chatComplete( | RequestTimeoutError | ConnectionError >( - m$.json(200, components.ChatCompletionResponse$inboundSchema), - m$.jsonErr(422, errors.HTTPValidationError$inboundSchema), - m$.fail(["4XX", "5XX"]), - )(response, { extraFields: responseFields$ }); - if (!result$.ok) { - return result$; + M.json(200, components.ChatCompletionResponse$inboundSchema), + M.jsonErr(422, errors.HTTPValidationError$inboundSchema), + M.fail(["4XX", "5XX"]), + )(response, { extraFields: responseFields }); + if (!result.ok) { + return result; } - return result$; + return result; } diff --git a/packages/mistralai-gcp/src/funcs/chatStream.ts b/packages/mistralai-gcp/src/funcs/chatStream.ts index 1183a39..9bed6da 100644 --- a/packages/mistralai-gcp/src/funcs/chatStream.ts +++ b/packages/mistralai-gcp/src/funcs/chatStream.ts @@ -4,10 +4,10 @@ import * as z from "zod"; import { MistralGoogleCloudCore } from "../core.js"; -import { encodeJSON as encodeJSON$ } from "../lib/encodings.js"; +import { encodeJSON } from "../lib/encodings.js"; import { EventStream } from "../lib/event-streams.js"; -import * as m$ from "../lib/matchers.js"; -import * as schemas$ from "../lib/schemas.js"; +import * as M from "../lib/matchers.js"; +import { safeParse } from "../lib/schemas.js"; import { RequestOptions } from "../lib/sdks.js"; import { extractSecurity, resolveGlobalSecurity } from "../lib/security.js"; import { pathToFunc } from "../lib/url.js"; @@ -31,7 +31,7 @@ import { Result } from "../types/fp.js"; * Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. */ export async function chatStream( - client$: MistralGoogleCloudCore, + client: MistralGoogleCloudCore, request: components.ChatCompletionStreamRequest, options?: RequestOptions, ): Promise< @@ -47,66 +47,68 @@ export async function chatStream( | ConnectionError > > { - const input$ = request; - - const parsed$ = schemas$.safeParse( - input$, - (value$) => - components.ChatCompletionStreamRequest$outboundSchema.parse(value$), + const parsed = safeParse( + request, + (value) => + components.ChatCompletionStreamRequest$outboundSchema.parse(value), "Input validation failed", ); - if (!parsed$.ok) { - return parsed$; + if (!parsed.ok) { + return parsed; } - const payload$ = parsed$.value; - const body$ = encodeJSON$("body", payload$, { explode: true }); + const payload = parsed.value; + const body = encodeJSON("body", payload, { explode: true }); - const path$ = pathToFunc("/streamRawPredict")(); + const path = pathToFunc("/streamRawPredict")(); - const headers$ = new Headers({ + const headers = new Headers({ "Content-Type": "application/json", Accept: "text/event-stream", }); - const apiKey$ = await extractSecurity(client$.options$.apiKey); - const security$ = apiKey$ == null ? {} : { apiKey: apiKey$ }; + const secConfig = await extractSecurity(client._options.apiKey); + const securityInput = secConfig == null ? {} : { apiKey: secConfig }; + const requestSecurity = resolveGlobalSecurity(securityInput); + const context = { operationID: "stream_chat", oAuth2Scopes: [], - securitySource: client$.options$.apiKey, + securitySource: client._options.apiKey, + retryConfig: options?.retries + || client._options.retryConfig + || { strategy: "none" }, + retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], }; - const securitySettings$ = resolveGlobalSecurity(security$); - const requestRes = client$.createRequest$(context, { - security: securitySettings$, + const requestRes = client._createRequest(context, { + security: requestSecurity, method: "POST", - path: path$, - headers: headers$, - body: body$, - timeoutMs: options?.timeoutMs || client$.options$.timeoutMs || -1, + path: path, + headers: headers, + body: body, + timeoutMs: options?.timeoutMs || client._options.timeoutMs || -1, }, options); if (!requestRes.ok) { return requestRes; } - const request$ = requestRes.value; + const req = requestRes.value; - const doResult = await client$.do$(request$, { + const doResult = await client._do(req, { context, errorCodes: ["422", "4XX", "5XX"], - retryConfig: options?.retries - || client$.options$.retryConfig, - retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], + retryConfig: context.retryConfig, + retryCodes: context.retryCodes, }); if (!doResult.ok) { return doResult; } const response = doResult.value; - const responseFields$ = { - HttpMeta: { Response: response, Request: request$ }, + const responseFields = { + HttpMeta: { Response: response, Request: req }, }; - const [result$] = await m$.match< + const [result] = await M.match< EventStream, | errors.HTTPValidationError | SDKError @@ -117,7 +119,7 @@ export async function chatStream( | RequestTimeoutError | ConnectionError >( - m$.sse( + M.sse( 200, z.instanceof(ReadableStream).transform(stream => { return new EventStream({ @@ -130,12 +132,12 @@ export async function chatStream( }), { sseSentinel: "[DONE]" }, ), - m$.jsonErr(422, errors.HTTPValidationError$inboundSchema), - m$.fail(["4XX", "5XX"]), - )(response, { extraFields: responseFields$ }); - if (!result$.ok) { - return result$; + M.jsonErr(422, errors.HTTPValidationError$inboundSchema), + M.fail(["4XX", "5XX"]), + )(response, { extraFields: responseFields }); + if (!result.ok) { + return result; } - return result$; + return result; } diff --git a/packages/mistralai-gcp/src/funcs/fimComplete.ts b/packages/mistralai-gcp/src/funcs/fimComplete.ts index 1989872..c6b0b01 100644 --- a/packages/mistralai-gcp/src/funcs/fimComplete.ts +++ b/packages/mistralai-gcp/src/funcs/fimComplete.ts @@ -3,9 +3,9 @@ */ import { MistralGoogleCloudCore } from "../core.js"; -import { encodeJSON as encodeJSON$ } from "../lib/encodings.js"; -import * as m$ from "../lib/matchers.js"; -import * as schemas$ from "../lib/schemas.js"; +import { encodeJSON } from "../lib/encodings.js"; +import * as M from "../lib/matchers.js"; +import { safeParse } from "../lib/schemas.js"; import { RequestOptions } from "../lib/sdks.js"; import { extractSecurity, resolveGlobalSecurity } from "../lib/security.js"; import { pathToFunc } from "../lib/url.js"; @@ -29,7 +29,7 @@ import { Result } from "../types/fp.js"; * FIM completion. */ export async function fimComplete( - client$: MistralGoogleCloudCore, + client: MistralGoogleCloudCore, request: components.FIMCompletionRequest, options?: RequestOptions, ): Promise< @@ -45,65 +45,67 @@ export async function fimComplete( | ConnectionError > > { - const input$ = request; - - const parsed$ = schemas$.safeParse( - input$, - (value$) => components.FIMCompletionRequest$outboundSchema.parse(value$), + const parsed = safeParse( + request, + (value) => components.FIMCompletionRequest$outboundSchema.parse(value), "Input validation failed", ); - if (!parsed$.ok) { - return parsed$; + if (!parsed.ok) { + return parsed; } - const payload$ = parsed$.value; - const body$ = encodeJSON$("body", payload$, { explode: true }); + const payload = parsed.value; + const body = encodeJSON("body", payload, { explode: true }); - const path$ = pathToFunc("/rawPredict#fim")(); + const path = pathToFunc("/rawPredict#fim")(); - const headers$ = new Headers({ + const headers = new Headers({ "Content-Type": "application/json", Accept: "application/json", }); - const apiKey$ = await extractSecurity(client$.options$.apiKey); - const security$ = apiKey$ == null ? {} : { apiKey: apiKey$ }; + const secConfig = await extractSecurity(client._options.apiKey); + const securityInput = secConfig == null ? {} : { apiKey: secConfig }; + const requestSecurity = resolveGlobalSecurity(securityInput); + const context = { operationID: "fim_completion_v1_fim_completions_post", oAuth2Scopes: [], - securitySource: client$.options$.apiKey, + securitySource: client._options.apiKey, + retryConfig: options?.retries + || client._options.retryConfig + || { strategy: "none" }, + retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], }; - const securitySettings$ = resolveGlobalSecurity(security$); - const requestRes = client$.createRequest$(context, { - security: securitySettings$, + const requestRes = client._createRequest(context, { + security: requestSecurity, method: "POST", - path: path$, - headers: headers$, - body: body$, - timeoutMs: options?.timeoutMs || client$.options$.timeoutMs || -1, + path: path, + headers: headers, + body: body, + timeoutMs: options?.timeoutMs || client._options.timeoutMs || -1, }, options); if (!requestRes.ok) { return requestRes; } - const request$ = requestRes.value; + const req = requestRes.value; - const doResult = await client$.do$(request$, { + const doResult = await client._do(req, { context, errorCodes: ["422", "4XX", "5XX"], - retryConfig: options?.retries - || client$.options$.retryConfig, - retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], + retryConfig: context.retryConfig, + retryCodes: context.retryCodes, }); if (!doResult.ok) { return doResult; } const response = doResult.value; - const responseFields$ = { - HttpMeta: { Response: response, Request: request$ }, + const responseFields = { + HttpMeta: { Response: response, Request: req }, }; - const [result$] = await m$.match< + const [result] = await M.match< components.FIMCompletionResponse, | errors.HTTPValidationError | SDKError @@ -114,13 +116,13 @@ export async function fimComplete( | RequestTimeoutError | ConnectionError >( - m$.json(200, components.FIMCompletionResponse$inboundSchema), - m$.jsonErr(422, errors.HTTPValidationError$inboundSchema), - m$.fail(["4XX", "5XX"]), - )(response, { extraFields: responseFields$ }); - if (!result$.ok) { - return result$; + M.json(200, components.FIMCompletionResponse$inboundSchema), + M.jsonErr(422, errors.HTTPValidationError$inboundSchema), + M.fail(["4XX", "5XX"]), + )(response, { extraFields: responseFields }); + if (!result.ok) { + return result; } - return result$; + return result; } diff --git a/packages/mistralai-gcp/src/funcs/fimStream.ts b/packages/mistralai-gcp/src/funcs/fimStream.ts index 147532a..3de7685 100644 --- a/packages/mistralai-gcp/src/funcs/fimStream.ts +++ b/packages/mistralai-gcp/src/funcs/fimStream.ts @@ -4,10 +4,10 @@ import * as z from "zod"; import { MistralGoogleCloudCore } from "../core.js"; -import { encodeJSON as encodeJSON$ } from "../lib/encodings.js"; +import { encodeJSON } from "../lib/encodings.js"; import { EventStream } from "../lib/event-streams.js"; -import * as m$ from "../lib/matchers.js"; -import * as schemas$ from "../lib/schemas.js"; +import * as M from "../lib/matchers.js"; +import { safeParse } from "../lib/schemas.js"; import { RequestOptions } from "../lib/sdks.js"; import { extractSecurity, resolveGlobalSecurity } from "../lib/security.js"; import { pathToFunc } from "../lib/url.js"; @@ -31,7 +31,7 @@ import { Result } from "../types/fp.js"; * Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. */ export async function fimStream( - client$: MistralGoogleCloudCore, + client: MistralGoogleCloudCore, request: components.FIMCompletionStreamRequest, options?: RequestOptions, ): Promise< @@ -47,66 +47,68 @@ export async function fimStream( | ConnectionError > > { - const input$ = request; - - const parsed$ = schemas$.safeParse( - input$, - (value$) => - components.FIMCompletionStreamRequest$outboundSchema.parse(value$), + const parsed = safeParse( + request, + (value) => + components.FIMCompletionStreamRequest$outboundSchema.parse(value), "Input validation failed", ); - if (!parsed$.ok) { - return parsed$; + if (!parsed.ok) { + return parsed; } - const payload$ = parsed$.value; - const body$ = encodeJSON$("body", payload$, { explode: true }); + const payload = parsed.value; + const body = encodeJSON("body", payload, { explode: true }); - const path$ = pathToFunc("/streamRawPredict#fim")(); + const path = pathToFunc("/streamRawPredict#fim")(); - const headers$ = new Headers({ + const headers = new Headers({ "Content-Type": "application/json", Accept: "text/event-stream", }); - const apiKey$ = await extractSecurity(client$.options$.apiKey); - const security$ = apiKey$ == null ? {} : { apiKey: apiKey$ }; + const secConfig = await extractSecurity(client._options.apiKey); + const securityInput = secConfig == null ? {} : { apiKey: secConfig }; + const requestSecurity = resolveGlobalSecurity(securityInput); + const context = { operationID: "stream_fim", oAuth2Scopes: [], - securitySource: client$.options$.apiKey, + securitySource: client._options.apiKey, + retryConfig: options?.retries + || client._options.retryConfig + || { strategy: "none" }, + retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], }; - const securitySettings$ = resolveGlobalSecurity(security$); - const requestRes = client$.createRequest$(context, { - security: securitySettings$, + const requestRes = client._createRequest(context, { + security: requestSecurity, method: "POST", - path: path$, - headers: headers$, - body: body$, - timeoutMs: options?.timeoutMs || client$.options$.timeoutMs || -1, + path: path, + headers: headers, + body: body, + timeoutMs: options?.timeoutMs || client._options.timeoutMs || -1, }, options); if (!requestRes.ok) { return requestRes; } - const request$ = requestRes.value; + const req = requestRes.value; - const doResult = await client$.do$(request$, { + const doResult = await client._do(req, { context, errorCodes: ["422", "4XX", "5XX"], - retryConfig: options?.retries - || client$.options$.retryConfig, - retryCodes: options?.retryCodes || ["429", "500", "502", "503", "504"], + retryConfig: context.retryConfig, + retryCodes: context.retryCodes, }); if (!doResult.ok) { return doResult; } const response = doResult.value; - const responseFields$ = { - HttpMeta: { Response: response, Request: request$ }, + const responseFields = { + HttpMeta: { Response: response, Request: req }, }; - const [result$] = await m$.match< + const [result] = await M.match< EventStream, | errors.HTTPValidationError | SDKError @@ -117,7 +119,7 @@ export async function fimStream( | RequestTimeoutError | ConnectionError >( - m$.sse( + M.sse( 200, z.instanceof(ReadableStream).transform(stream => { return new EventStream({ @@ -130,12 +132,12 @@ export async function fimStream( }), { sseSentinel: "[DONE]" }, ), - m$.jsonErr(422, errors.HTTPValidationError$inboundSchema), - m$.fail(["4XX", "5XX"]), - )(response, { extraFields: responseFields$ }); - if (!result$.ok) { - return result$; + M.jsonErr(422, errors.HTTPValidationError$inboundSchema), + M.fail(["4XX", "5XX"]), + )(response, { extraFields: responseFields }); + if (!result.ok) { + return result; } - return result$; + return result; } diff --git a/packages/mistralai-gcp/src/hooks/types.ts b/packages/mistralai-gcp/src/hooks/types.ts index 656b9e3..4079cc9 100644 --- a/packages/mistralai-gcp/src/hooks/types.ts +++ b/packages/mistralai-gcp/src/hooks/types.ts @@ -3,11 +3,13 @@ */ import { HTTPClient, RequestInput } from "../lib/http.js"; +import { RetryConfig } from "../lib/retries.js"; export type HookContext = { operationID: string; oAuth2Scopes?: string[]; securitySource?: any | (() => Promise); + retryConfig: RetryConfig; }; export type Awaitable = T | Promise; diff --git a/packages/mistralai-gcp/src/lib/config.ts b/packages/mistralai-gcp/src/lib/config.ts index b1bfb06..488cd6a 100644 --- a/packages/mistralai-gcp/src/lib/config.ts +++ b/packages/mistralai-gcp/src/lib/config.ts @@ -8,14 +8,14 @@ import { RetryConfig } from "./retries.js"; import { Params, pathToFunc } from "./url.js"; /** - * Production server + * EU Production server */ -export const ServerProd = "prod"; +export const ServerEu = "eu"; /** * Contains the list of servers available to the SDK */ export const ServerList = { - [ServerProd]: "https://api.mistral.ai", + [ServerEu]: "https://api.mistral.ai", } as const; export type SDKOptions = { @@ -44,7 +44,7 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { const params: Params = {}; if (!serverURL) { - const server = options.server ?? ServerProd; + const server = options.server ?? ServerEu; serverURL = ServerList[server] || ""; } @@ -55,8 +55,8 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { export const SDK_METADATA = { language: "typescript", openapiDocVersion: "0.0.2", - sdkVersion: "1.1.0", - genVersion: "2.420.2", + sdkVersion: "1.3.0", + genVersion: "2.452.0", userAgent: - "speakeasy-sdk/typescript 1.1.0 2.420.2 0.0.2 @mistralai/mistralai-gcp", + "speakeasy-sdk/typescript 1.3.0 2.452.0 0.0.2 @mistralai/mistralai-gcp", } as const; diff --git a/packages/mistralai-gcp/src/lib/event-streams.ts b/packages/mistralai-gcp/src/lib/event-streams.ts index cdad7f4..2ab114f 100644 --- a/packages/mistralai-gcp/src/lib/event-streams.ts +++ b/packages/mistralai-gcp/src/lib/event-streams.ts @@ -72,6 +72,12 @@ export class EventStream> { yield event; } } + } catch (e: unknown) { + if (e instanceof Error && e.name === "AbortError") { + return; + } + + throw e; } finally { reader.releaseLock(); } diff --git a/packages/mistralai-gcp/src/lib/retries.ts b/packages/mistralai-gcp/src/lib/retries.ts index df3e0bc..93ebc8d 100644 --- a/packages/mistralai-gcp/src/lib/retries.ts +++ b/packages/mistralai-gcp/src/lib/retries.ts @@ -26,23 +26,44 @@ export type RetryConfig = retryConnectionErrors?: boolean; }; -class PermanentError extends Error { - inner: unknown; +/** + * PermanentError is an error that is not recoverable. Throwing this error will + * cause a retry loop to terminate. + */ +export class PermanentError extends Error { + /** The underlying cause of the error. */ + override readonly cause: unknown; + + constructor(message: string, options?: { cause?: unknown }) { + let msg = message; + if (options?.cause) { + msg += `: ${options.cause}`; + } - constructor(inner: unknown) { - super("Permanent error"); - this.inner = inner; + super(msg, options); + this.name = "PermanentError"; + // In older runtimes, the cause field would not have been assigned through + // the super() call. + if (typeof this.cause === "undefined") { + this.cause = options?.cause; + } Object.setPrototypeOf(this, PermanentError.prototype); } } -class TemporaryError extends Error { - res: Response; +/** + * TemporaryError is an error is used to signal that an HTTP request can be + * retried as part of a retry loop. If retry attempts are exhausted and this + * error is thrown, the response will be returned to the caller. + */ +export class TemporaryError extends Error { + response: Response; - constructor(res: Response) { - super("Temporary error"); - this.res = res; + constructor(message: string, response: Response) { + super(message); + this.response = response; + this.name = "TemporaryError"; Object.setPrototypeOf(this, TemporaryError.prototype); } @@ -80,11 +101,14 @@ function wrapFetcher( try { const res = await fn(); if (isRetryableResponse(res, options.statusCodes)) { - throw new TemporaryError(res); + throw new TemporaryError( + "Response failed with retryable status code", + res, + ); } return res; - } catch (err) { + } catch (err: unknown) { if (err instanceof TemporaryError) { throw err; } @@ -96,7 +120,7 @@ function wrapFetcher( throw err; } - throw new PermanentError(err); + throw new PermanentError("Permanent error", { cause: err }); } }; } @@ -139,37 +163,25 @@ async function retryBackoff( try { const res = await fn(); return res; - } catch (err) { + } catch (err: unknown) { if (err instanceof PermanentError) { - throw err.inner; + throw err.cause; } const elapsed = Date.now() - start; if (elapsed > maxElapsedTime) { if (err instanceof TemporaryError) { - return err.res; + return err.response; } throw err; } let retryInterval = 0; - if (err instanceof TemporaryError && err.res && err.res.headers) { - const retryVal = err.res.headers.get("retry-after") || ""; - if (retryVal != "") { - const parsedNumber = Number(retryVal); - if (!isNaN(parsedNumber) && Number.isInteger(parsedNumber)) { - retryInterval = parsedNumber * 1000; - } else { - const parsedDate = Date.parse(retryVal); - if (!isNaN(parsedDate)) { - const deltaMS = parsedDate - Date.now(); - retryInterval = deltaMS > 0 ? Math.ceil(deltaMS) : 0; - } - } - } + if (err instanceof TemporaryError) { + retryInterval = retryIntervalFromResponse(err.response); } - if (retryInterval == 0) { + if (retryInterval <= 0) { retryInterval = initialInterval * Math.pow(x, exponent) + Math.random() * 1000; } @@ -182,6 +194,26 @@ async function retryBackoff( } } +function retryIntervalFromResponse(res: Response): number { + const retryVal = res.headers.get("retry-after") || ""; + if (!retryVal) { + return 0; + } + + const parsedNumber = Number(retryVal); + if (Number.isInteger(parsedNumber)) { + return parsedNumber * 1000; + } + + const parsedDate = Date.parse(retryVal); + if (Number.isInteger(parsedDate)) { + const deltaMS = parsedDate - Date.now(); + return deltaMS > 0 ? Math.ceil(deltaMS) : 0; + } + + return 0; +} + async function delay(delay: number): Promise { return new Promise((resolve) => setTimeout(resolve, delay)); } diff --git a/packages/mistralai-gcp/src/lib/sdks.ts b/packages/mistralai-gcp/src/lib/sdks.ts index 5e2a890..2ce720a 100644 --- a/packages/mistralai-gcp/src/lib/sdks.ts +++ b/packages/mistralai-gcp/src/lib/sdks.ts @@ -71,11 +71,11 @@ const isBrowserLike = webWorkerLike || (typeof window === "object" && typeof window.document !== "undefined"); export class ClientSDK { - private readonly httpClient: HTTPClient; - protected readonly baseURL: URL | null; - protected readonly hooks$: SDKHooks; - protected readonly logger?: Logger | undefined; - public readonly options$: SDKOptions & { hooks?: SDKHooks }; + readonly #httpClient: HTTPClient; + readonly #hooks: SDKHooks; + readonly #logger?: Logger | undefined; + protected readonly _baseURL: URL | null; + public readonly _options: SDKOptions & { hooks?: SDKHooks }; constructor(options: SDKOptions = {}) { const opt = options as unknown; @@ -85,33 +85,33 @@ export class ClientSDK { && "hooks" in opt && opt.hooks instanceof SDKHooks ) { - this.hooks$ = opt.hooks; + this.#hooks = opt.hooks; } else { - this.hooks$ = new SDKHooks(); + this.#hooks = new SDKHooks(); } - this.options$ = { ...options, hooks: this.hooks$ }; + this._options = { ...options, hooks: this.#hooks }; const url = serverURLFromOptions(options); if (url) { url.pathname = url.pathname.replace(/\/+$/, "") + "/"; } - const { baseURL, client } = this.hooks$.sdkInit({ + const { baseURL, client } = this.#hooks.sdkInit({ baseURL: url, client: options.httpClient || new HTTPClient(), }); - this.baseURL = baseURL; - this.httpClient = client; - this.logger = options.debugLogger; + this._baseURL = baseURL; + this.#httpClient = client; + this.#logger = options.debugLogger; } - public createRequest$( + public _createRequest( context: HookContext, conf: RequestConfig, options?: RequestOptions, ): Result { const { method, path, query, headers: opHeaders, security } = conf; - const base = conf.baseURL ?? this.baseURL; + const base = conf.baseURL ?? this._baseURL; if (!base) { return ERR(new InvalidRequestError("No base URL provided for operation")); } @@ -195,7 +195,7 @@ export class ClientSDK { let input; try { - input = this.hooks$.beforeCreateRequest(context, { + input = this.#hooks.beforeCreateRequest(context, { url: reqURL, options: { ...fetchOptions, @@ -215,13 +215,13 @@ export class ClientSDK { return OK(new Request(input.url, input.options)); } - public async do$( + public async _do( request: Request, options: { context: HookContext; errorCodes: number | string | (number | string)[]; - retryConfig?: RetryConfig | undefined; - retryCodes?: string[] | undefined; + retryConfig: RetryConfig; + retryCodes: string[]; }, ): Promise< Result< @@ -233,34 +233,38 @@ export class ClientSDK { > > { const { context, errorCodes } = options; - const retryConfig = options.retryConfig || { strategy: "none" }; - const retryCodes = options.retryCodes || []; return retry( async () => { - const req = await this.hooks$.beforeRequest(context, request.clone()); - await logRequest(this.logger, req).catch((e) => - this.logger?.log("Failed to log request:", e) + const req = await this.#hooks.beforeRequest(context, request.clone()); + await logRequest(this.#logger, req).catch((e) => + this.#logger?.log("Failed to log request:", e) ); - let response = await this.httpClient.request(req); + let response = await this.#httpClient.request(req); - if (matchStatusCode(response, errorCodes)) { - const result = await this.hooks$.afterError(context, response, null); - if (result.error) { - throw result.error; + try { + if (matchStatusCode(response, errorCodes)) { + const result = await this.#hooks.afterError( + context, + response, + null, + ); + if (result.error) { + throw result.error; + } + response = result.response || response; + } else { + response = await this.#hooks.afterSuccess(context, response); } - response = result.response || response; - } else { - response = await this.hooks$.afterSuccess(context, response); + } finally { + await logResponse(this.#logger, response, req) + .catch(e => this.#logger?.log("Failed to log response:", e)); } - await logResponse(this.logger, response, req) - .catch(e => this.logger?.log("Failed to log response:", e)); - return response; }, - { config: retryConfig, statusCodes: retryCodes }, + { config: options.retryConfig, statusCodes: options.retryCodes }, ).then( (r) => OK(r), (err) => { diff --git a/packages/mistralai-gcp/src/models/components/assistantmessage.ts b/packages/mistralai-gcp/src/models/components/assistantmessage.ts index 4eb8576..c0b7260 100644 --- a/packages/mistralai-gcp/src/models/components/assistantmessage.ts +++ b/packages/mistralai-gcp/src/models/components/assistantmessage.ts @@ -5,6 +5,12 @@ import * as z from "zod"; import { remap as remap$ } from "../../lib/primitives.js"; import { ClosedEnum } from "../../types/enums.js"; +import { + ContentChunk, + ContentChunk$inboundSchema, + ContentChunk$Outbound, + ContentChunk$outboundSchema, +} from "./contentchunk.js"; import { ToolCall, ToolCall$inboundSchema, @@ -12,21 +18,52 @@ import { ToolCall$outboundSchema, } from "./toolcall.js"; +export type AssistantMessageContent = string | Array; + export const AssistantMessageRole = { Assistant: "assistant", } as const; export type AssistantMessageRole = ClosedEnum; export type AssistantMessage = { - content?: string | null | undefined; + content?: string | Array | null | undefined; toolCalls?: Array | null | undefined; - /** - * Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. - */ prefix?: boolean | undefined; role?: AssistantMessageRole | undefined; }; +/** @internal */ +export const AssistantMessageContent$inboundSchema: z.ZodType< + AssistantMessageContent, + z.ZodTypeDef, + unknown +> = z.union([z.string(), z.array(ContentChunk$inboundSchema)]); + +/** @internal */ +export type AssistantMessageContent$Outbound = + | string + | Array; + +/** @internal */ +export const AssistantMessageContent$outboundSchema: z.ZodType< + AssistantMessageContent$Outbound, + z.ZodTypeDef, + AssistantMessageContent +> = z.union([z.string(), z.array(ContentChunk$outboundSchema)]); + +/** + * @internal + * @deprecated This namespace will be removed in future versions. Use schemas and types that are exported directly from this module. + */ +export namespace AssistantMessageContent$ { + /** @deprecated use `AssistantMessageContent$inboundSchema` instead. */ + export const inboundSchema = AssistantMessageContent$inboundSchema; + /** @deprecated use `AssistantMessageContent$outboundSchema` instead. */ + export const outboundSchema = AssistantMessageContent$outboundSchema; + /** @deprecated use `AssistantMessageContent$Outbound` instead. */ + export type Outbound = AssistantMessageContent$Outbound; +} + /** @internal */ export const AssistantMessageRole$inboundSchema: z.ZodNativeEnum< typeof AssistantMessageRole @@ -54,7 +91,9 @@ export const AssistantMessage$inboundSchema: z.ZodType< z.ZodTypeDef, unknown > = z.object({ - content: z.nullable(z.string()).optional(), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$inboundSchema)]), + ).optional(), tool_calls: z.nullable(z.array(ToolCall$inboundSchema)).optional(), prefix: z.boolean().default(false), role: AssistantMessageRole$inboundSchema.default("assistant"), @@ -66,7 +105,7 @@ export const AssistantMessage$inboundSchema: z.ZodType< /** @internal */ export type AssistantMessage$Outbound = { - content?: string | null | undefined; + content?: string | Array | null | undefined; tool_calls?: Array | null | undefined; prefix: boolean; role: string; @@ -78,7 +117,9 @@ export const AssistantMessage$outboundSchema: z.ZodType< z.ZodTypeDef, AssistantMessage > = z.object({ - content: z.nullable(z.string()).optional(), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$outboundSchema)]), + ).optional(), toolCalls: z.nullable(z.array(ToolCall$outboundSchema)).optional(), prefix: z.boolean().default(false), role: AssistantMessageRole$outboundSchema.default("assistant"), diff --git a/packages/mistralai-gcp/src/models/components/chatcompletionchoice.ts b/packages/mistralai-gcp/src/models/components/chatcompletionchoice.ts index 915e8fb..074f8e1 100644 --- a/packages/mistralai-gcp/src/models/components/chatcompletionchoice.ts +++ b/packages/mistralai-gcp/src/models/components/chatcompletionchoice.ts @@ -4,7 +4,11 @@ import * as z from "zod"; import { remap as remap$ } from "../../lib/primitives.js"; -import { ClosedEnum } from "../../types/enums.js"; +import { + catchUnrecognizedEnum, + OpenEnum, + Unrecognized, +} from "../../types/enums.js"; import { AssistantMessage, AssistantMessage$inboundSchema, @@ -19,7 +23,7 @@ export const ChatCompletionChoiceFinishReason = { Error: "error", ToolCalls: "tool_calls", } as const; -export type ChatCompletionChoiceFinishReason = ClosedEnum< +export type ChatCompletionChoiceFinishReason = OpenEnum< typeof ChatCompletionChoiceFinishReason >; @@ -30,14 +34,25 @@ export type ChatCompletionChoice = { }; /** @internal */ -export const ChatCompletionChoiceFinishReason$inboundSchema: z.ZodNativeEnum< - typeof ChatCompletionChoiceFinishReason -> = z.nativeEnum(ChatCompletionChoiceFinishReason); +export const ChatCompletionChoiceFinishReason$inboundSchema: z.ZodType< + ChatCompletionChoiceFinishReason, + z.ZodTypeDef, + unknown +> = z + .union([ + z.nativeEnum(ChatCompletionChoiceFinishReason), + z.string().transform(catchUnrecognizedEnum), + ]); /** @internal */ -export const ChatCompletionChoiceFinishReason$outboundSchema: z.ZodNativeEnum< - typeof ChatCompletionChoiceFinishReason -> = ChatCompletionChoiceFinishReason$inboundSchema; +export const ChatCompletionChoiceFinishReason$outboundSchema: z.ZodType< + ChatCompletionChoiceFinishReason, + z.ZodTypeDef, + ChatCompletionChoiceFinishReason +> = z.union([ + z.nativeEnum(ChatCompletionChoiceFinishReason), + z.string().and(z.custom>()), +]); /** * @internal diff --git a/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts b/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts index f808058..4c2ba99 100644 --- a/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts +++ b/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts @@ -71,9 +71,9 @@ export type ChatCompletionRequest = { */ model: string | null; /** - * What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + * What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. */ - temperature?: number | undefined; + temperature?: number | null | undefined; /** * Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ @@ -82,10 +82,6 @@ export type ChatCompletionRequest = { * The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. */ maxTokens?: number | null | undefined; - /** - * The minimum number of tokens to generate in the completion. - */ - minTokens?: number | null | undefined; /** * Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. */ @@ -110,6 +106,18 @@ export type ChatCompletionRequest = { responseFormat?: ResponseFormat | undefined; tools?: Array | null | undefined; toolChoice?: ToolChoice | ToolChoiceEnum | undefined; + /** + * presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + */ + presencePenalty?: number | undefined; + /** + * frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + */ + frequencyPenalty?: number | undefined; + /** + * Number of completions to return for each request, input tokens are only billed once. + */ + n?: number | null | undefined; }; /** @internal */ @@ -249,10 +257,9 @@ export const ChatCompletionRequest$inboundSchema: z.ZodType< unknown > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), top_p: z.number().default(1), max_tokens: z.nullable(z.number().int()).optional(), - min_tokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(false), stop: z.union([z.string(), z.array(z.string())]).optional(), random_seed: z.nullable(z.number().int()).optional(), @@ -284,24 +291,27 @@ export const ChatCompletionRequest$inboundSchema: z.ZodType< tools: z.nullable(z.array(Tool$inboundSchema)).optional(), tool_choice: z.union([ToolChoice$inboundSchema, ToolChoiceEnum$inboundSchema]) .optional(), + presence_penalty: z.number().default(0), + frequency_penalty: z.number().default(0), + n: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { "top_p": "topP", "max_tokens": "maxTokens", - "min_tokens": "minTokens", "random_seed": "randomSeed", "response_format": "responseFormat", "tool_choice": "toolChoice", + "presence_penalty": "presencePenalty", + "frequency_penalty": "frequencyPenalty", }); }); /** @internal */ export type ChatCompletionRequest$Outbound = { model: string | null; - temperature: number; + temperature?: number | null | undefined; top_p: number; max_tokens?: number | null | undefined; - min_tokens?: number | null | undefined; stream: boolean; stop?: string | Array | undefined; random_seed?: number | null | undefined; @@ -314,6 +324,9 @@ export type ChatCompletionRequest$Outbound = { response_format?: ResponseFormat$Outbound | undefined; tools?: Array | null | undefined; tool_choice?: ToolChoice$Outbound | string | undefined; + presence_penalty: number; + frequency_penalty: number; + n?: number | null | undefined; }; /** @internal */ @@ -323,10 +336,9 @@ export const ChatCompletionRequest$outboundSchema: z.ZodType< ChatCompletionRequest > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), topP: z.number().default(1), maxTokens: z.nullable(z.number().int()).optional(), - minTokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(false), stop: z.union([z.string(), z.array(z.string())]).optional(), randomSeed: z.nullable(z.number().int()).optional(), @@ -360,14 +372,18 @@ export const ChatCompletionRequest$outboundSchema: z.ZodType< ToolChoice$outboundSchema, ToolChoiceEnum$outboundSchema, ]).optional(), + presencePenalty: z.number().default(0), + frequencyPenalty: z.number().default(0), + n: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { topP: "top_p", maxTokens: "max_tokens", - minTokens: "min_tokens", randomSeed: "random_seed", responseFormat: "response_format", toolChoice: "tool_choice", + presencePenalty: "presence_penalty", + frequencyPenalty: "frequency_penalty", }); }); diff --git a/packages/mistralai-gcp/src/models/components/chatcompletionstreamrequest.ts b/packages/mistralai-gcp/src/models/components/chatcompletionstreamrequest.ts index 4917568..dbba7fa 100644 --- a/packages/mistralai-gcp/src/models/components/chatcompletionstreamrequest.ts +++ b/packages/mistralai-gcp/src/models/components/chatcompletionstreamrequest.ts @@ -71,9 +71,9 @@ export type ChatCompletionStreamRequest = { */ model: string | null; /** - * What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + * What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. */ - temperature?: number | undefined; + temperature?: number | null | undefined; /** * Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ @@ -82,10 +82,6 @@ export type ChatCompletionStreamRequest = { * The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. */ maxTokens?: number | null | undefined; - /** - * The minimum number of tokens to generate in the completion. - */ - minTokens?: number | null | undefined; stream?: boolean | undefined; /** * Stop generation if this token is detected. Or if one of these tokens is detected when providing an array @@ -107,6 +103,18 @@ export type ChatCompletionStreamRequest = { responseFormat?: ResponseFormat | undefined; tools?: Array | null | undefined; toolChoice?: ToolChoice | ToolChoiceEnum | undefined; + /** + * presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + */ + presencePenalty?: number | undefined; + /** + * frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + */ + frequencyPenalty?: number | undefined; + /** + * Number of completions to return for each request, input tokens are only billed once. + */ + n?: number | null | undefined; }; /** @internal */ @@ -242,10 +250,9 @@ export const ChatCompletionStreamRequest$inboundSchema: z.ZodType< unknown > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), top_p: z.number().default(1), max_tokens: z.nullable(z.number().int()).optional(), - min_tokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(true), stop: z.union([z.string(), z.array(z.string())]).optional(), random_seed: z.nullable(z.number().int()).optional(), @@ -277,24 +284,27 @@ export const ChatCompletionStreamRequest$inboundSchema: z.ZodType< tools: z.nullable(z.array(Tool$inboundSchema)).optional(), tool_choice: z.union([ToolChoice$inboundSchema, ToolChoiceEnum$inboundSchema]) .optional(), + presence_penalty: z.number().default(0), + frequency_penalty: z.number().default(0), + n: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { "top_p": "topP", "max_tokens": "maxTokens", - "min_tokens": "minTokens", "random_seed": "randomSeed", "response_format": "responseFormat", "tool_choice": "toolChoice", + "presence_penalty": "presencePenalty", + "frequency_penalty": "frequencyPenalty", }); }); /** @internal */ export type ChatCompletionStreamRequest$Outbound = { model: string | null; - temperature: number; + temperature?: number | null | undefined; top_p: number; max_tokens?: number | null | undefined; - min_tokens?: number | null | undefined; stream: boolean; stop?: string | Array | undefined; random_seed?: number | null | undefined; @@ -307,6 +317,9 @@ export type ChatCompletionStreamRequest$Outbound = { response_format?: ResponseFormat$Outbound | undefined; tools?: Array | null | undefined; tool_choice?: ToolChoice$Outbound | string | undefined; + presence_penalty: number; + frequency_penalty: number; + n?: number | null | undefined; }; /** @internal */ @@ -316,10 +329,9 @@ export const ChatCompletionStreamRequest$outboundSchema: z.ZodType< ChatCompletionStreamRequest > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), topP: z.number().default(1), maxTokens: z.nullable(z.number().int()).optional(), - minTokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(true), stop: z.union([z.string(), z.array(z.string())]).optional(), randomSeed: z.nullable(z.number().int()).optional(), @@ -353,14 +365,18 @@ export const ChatCompletionStreamRequest$outboundSchema: z.ZodType< ToolChoice$outboundSchema, ToolChoiceEnum$outboundSchema, ]).optional(), + presencePenalty: z.number().default(0), + frequencyPenalty: z.number().default(0), + n: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { topP: "top_p", maxTokens: "max_tokens", - minTokens: "min_tokens", randomSeed: "random_seed", responseFormat: "response_format", toolChoice: "tool_choice", + presencePenalty: "presence_penalty", + frequencyPenalty: "frequency_penalty", }); }); diff --git a/packages/mistralai-gcp/src/models/components/completionresponsestreamchoice.ts b/packages/mistralai-gcp/src/models/components/completionresponsestreamchoice.ts index f63f369..ac61d7b 100644 --- a/packages/mistralai-gcp/src/models/components/completionresponsestreamchoice.ts +++ b/packages/mistralai-gcp/src/models/components/completionresponsestreamchoice.ts @@ -4,7 +4,11 @@ import * as z from "zod"; import { remap as remap$ } from "../../lib/primitives.js"; -import { ClosedEnum } from "../../types/enums.js"; +import { + catchUnrecognizedEnum, + OpenEnum, + Unrecognized, +} from "../../types/enums.js"; import { DeltaMessage, DeltaMessage$inboundSchema, @@ -18,7 +22,7 @@ export const FinishReason = { Error: "error", ToolCalls: "tool_calls", } as const; -export type FinishReason = ClosedEnum; +export type FinishReason = OpenEnum; export type CompletionResponseStreamChoice = { index: number; @@ -27,12 +31,25 @@ export type CompletionResponseStreamChoice = { }; /** @internal */ -export const FinishReason$inboundSchema: z.ZodNativeEnum = - z.nativeEnum(FinishReason); +export const FinishReason$inboundSchema: z.ZodType< + FinishReason, + z.ZodTypeDef, + unknown +> = z + .union([ + z.nativeEnum(FinishReason), + z.string().transform(catchUnrecognizedEnum), + ]); /** @internal */ -export const FinishReason$outboundSchema: z.ZodNativeEnum = - FinishReason$inboundSchema; +export const FinishReason$outboundSchema: z.ZodType< + FinishReason, + z.ZodTypeDef, + FinishReason +> = z.union([ + z.nativeEnum(FinishReason), + z.string().and(z.custom>()), +]); /** * @internal diff --git a/packages/mistralai-gcp/src/models/components/deltamessage.ts b/packages/mistralai-gcp/src/models/components/deltamessage.ts index 16e4d5f..e9b3c0f 100644 --- a/packages/mistralai-gcp/src/models/components/deltamessage.ts +++ b/packages/mistralai-gcp/src/models/components/deltamessage.ts @@ -4,6 +4,12 @@ import * as z from "zod"; import { remap as remap$ } from "../../lib/primitives.js"; +import { + ContentChunk, + ContentChunk$inboundSchema, + ContentChunk$Outbound, + ContentChunk$outboundSchema, +} from "./contentchunk.js"; import { ToolCall, ToolCall$inboundSchema, @@ -11,20 +17,51 @@ import { ToolCall$outboundSchema, } from "./toolcall.js"; +export type Content = string | Array; + export type DeltaMessage = { - role?: string | undefined; - content?: string | null | undefined; + role?: string | null | undefined; + content?: string | Array | null | undefined; toolCalls?: Array | null | undefined; }; +/** @internal */ +export const Content$inboundSchema: z.ZodType = + z.union([z.string(), z.array(ContentChunk$inboundSchema)]); + +/** @internal */ +export type Content$Outbound = string | Array; + +/** @internal */ +export const Content$outboundSchema: z.ZodType< + Content$Outbound, + z.ZodTypeDef, + Content +> = z.union([z.string(), z.array(ContentChunk$outboundSchema)]); + +/** + * @internal + * @deprecated This namespace will be removed in future versions. Use schemas and types that are exported directly from this module. + */ +export namespace Content$ { + /** @deprecated use `Content$inboundSchema` instead. */ + export const inboundSchema = Content$inboundSchema; + /** @deprecated use `Content$outboundSchema` instead. */ + export const outboundSchema = Content$outboundSchema; + /** @deprecated use `Content$Outbound` instead. */ + export type Outbound = Content$Outbound; +} + /** @internal */ export const DeltaMessage$inboundSchema: z.ZodType< DeltaMessage, z.ZodTypeDef, unknown > = z.object({ - role: z.string().optional(), - content: z.nullable(z.string()).optional(), + role: z.nullable(z.string()).optional(), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$inboundSchema)]), + ).optional(), tool_calls: z.nullable(z.array(ToolCall$inboundSchema)).optional(), }).transform((v) => { return remap$(v, { @@ -34,8 +71,8 @@ export const DeltaMessage$inboundSchema: z.ZodType< /** @internal */ export type DeltaMessage$Outbound = { - role?: string | undefined; - content?: string | null | undefined; + role?: string | null | undefined; + content?: string | Array | null | undefined; tool_calls?: Array | null | undefined; }; @@ -45,8 +82,10 @@ export const DeltaMessage$outboundSchema: z.ZodType< z.ZodTypeDef, DeltaMessage > = z.object({ - role: z.string().optional(), - content: z.nullable(z.string()).optional(), + role: z.nullable(z.string()).optional(), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$outboundSchema)]), + ).optional(), toolCalls: z.nullable(z.array(ToolCall$outboundSchema)).optional(), }).transform((v) => { return remap$(v, { diff --git a/packages/mistralai-gcp/src/models/components/fimcompletionrequest.ts b/packages/mistralai-gcp/src/models/components/fimcompletionrequest.ts index f586912..565f6ac 100644 --- a/packages/mistralai-gcp/src/models/components/fimcompletionrequest.ts +++ b/packages/mistralai-gcp/src/models/components/fimcompletionrequest.ts @@ -20,9 +20,9 @@ export type FIMCompletionRequest = { */ model: string | null; /** - * What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + * What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. */ - temperature?: number | undefined; + temperature?: number | null | undefined; /** * Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ @@ -31,10 +31,6 @@ export type FIMCompletionRequest = { * The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. */ maxTokens?: number | null | undefined; - /** - * The minimum number of tokens to generate in the completion. - */ - minTokens?: number | null | undefined; /** * Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. */ @@ -55,6 +51,10 @@ export type FIMCompletionRequest = { * Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. */ suffix?: string | null | undefined; + /** + * The minimum number of tokens to generate in the completion. + */ + minTokens?: number | null | undefined; }; /** @internal */ @@ -94,36 +94,36 @@ export const FIMCompletionRequest$inboundSchema: z.ZodType< unknown > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), top_p: z.number().default(1), max_tokens: z.nullable(z.number().int()).optional(), - min_tokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(false), stop: z.union([z.string(), z.array(z.string())]).optional(), random_seed: z.nullable(z.number().int()).optional(), prompt: z.string(), suffix: z.nullable(z.string()).optional(), + min_tokens: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { "top_p": "topP", "max_tokens": "maxTokens", - "min_tokens": "minTokens", "random_seed": "randomSeed", + "min_tokens": "minTokens", }); }); /** @internal */ export type FIMCompletionRequest$Outbound = { model: string | null; - temperature: number; + temperature?: number | null | undefined; top_p: number; max_tokens?: number | null | undefined; - min_tokens?: number | null | undefined; stream: boolean; stop?: string | Array | undefined; random_seed?: number | null | undefined; prompt: string; suffix?: string | null | undefined; + min_tokens?: number | null | undefined; }; /** @internal */ @@ -133,21 +133,21 @@ export const FIMCompletionRequest$outboundSchema: z.ZodType< FIMCompletionRequest > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), topP: z.number().default(1), maxTokens: z.nullable(z.number().int()).optional(), - minTokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(false), stop: z.union([z.string(), z.array(z.string())]).optional(), randomSeed: z.nullable(z.number().int()).optional(), prompt: z.string(), suffix: z.nullable(z.string()).optional(), + minTokens: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { topP: "top_p", maxTokens: "max_tokens", - minTokens: "min_tokens", randomSeed: "random_seed", + minTokens: "min_tokens", }); }); diff --git a/packages/mistralai-gcp/src/models/components/fimcompletionstreamrequest.ts b/packages/mistralai-gcp/src/models/components/fimcompletionstreamrequest.ts index a8c23ee..d2195db 100644 --- a/packages/mistralai-gcp/src/models/components/fimcompletionstreamrequest.ts +++ b/packages/mistralai-gcp/src/models/components/fimcompletionstreamrequest.ts @@ -20,9 +20,9 @@ export type FIMCompletionStreamRequest = { */ model: string | null; /** - * What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + * What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. */ - temperature?: number | undefined; + temperature?: number | null | undefined; /** * Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ @@ -31,10 +31,6 @@ export type FIMCompletionStreamRequest = { * The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. */ maxTokens?: number | null | undefined; - /** - * The minimum number of tokens to generate in the completion. - */ - minTokens?: number | null | undefined; stream?: boolean | undefined; /** * Stop generation if this token is detected. Or if one of these tokens is detected when providing an array @@ -52,6 +48,10 @@ export type FIMCompletionStreamRequest = { * Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. */ suffix?: string | null | undefined; + /** + * The minimum number of tokens to generate in the completion. + */ + minTokens?: number | null | undefined; }; /** @internal */ @@ -91,36 +91,36 @@ export const FIMCompletionStreamRequest$inboundSchema: z.ZodType< unknown > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), top_p: z.number().default(1), max_tokens: z.nullable(z.number().int()).optional(), - min_tokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(true), stop: z.union([z.string(), z.array(z.string())]).optional(), random_seed: z.nullable(z.number().int()).optional(), prompt: z.string(), suffix: z.nullable(z.string()).optional(), + min_tokens: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { "top_p": "topP", "max_tokens": "maxTokens", - "min_tokens": "minTokens", "random_seed": "randomSeed", + "min_tokens": "minTokens", }); }); /** @internal */ export type FIMCompletionStreamRequest$Outbound = { model: string | null; - temperature: number; + temperature?: number | null | undefined; top_p: number; max_tokens?: number | null | undefined; - min_tokens?: number | null | undefined; stream: boolean; stop?: string | Array | undefined; random_seed?: number | null | undefined; prompt: string; suffix?: string | null | undefined; + min_tokens?: number | null | undefined; }; /** @internal */ @@ -130,21 +130,21 @@ export const FIMCompletionStreamRequest$outboundSchema: z.ZodType< FIMCompletionStreamRequest > = z.object({ model: z.nullable(z.string()), - temperature: z.number().default(0.7), + temperature: z.nullable(z.number()).optional(), topP: z.number().default(1), maxTokens: z.nullable(z.number().int()).optional(), - minTokens: z.nullable(z.number().int()).optional(), stream: z.boolean().default(true), stop: z.union([z.string(), z.array(z.string())]).optional(), randomSeed: z.nullable(z.number().int()).optional(), prompt: z.string(), suffix: z.nullable(z.string()).optional(), + minTokens: z.nullable(z.number().int()).optional(), }).transform((v) => { return remap$(v, { topP: "top_p", maxTokens: "max_tokens", - minTokens: "min_tokens", randomSeed: "random_seed", + minTokens: "min_tokens", }); }); diff --git a/packages/mistralai-gcp/src/models/components/systemmessage.ts b/packages/mistralai-gcp/src/models/components/systemmessage.ts index 2ea418f..b9f94b1 100644 --- a/packages/mistralai-gcp/src/models/components/systemmessage.ts +++ b/packages/mistralai-gcp/src/models/components/systemmessage.ts @@ -11,7 +11,7 @@ import { TextChunk$outboundSchema, } from "./textchunk.js"; -export type Content = string | Array; +export type SystemMessageContent = string | Array; export const Role = { System: "system", @@ -24,30 +24,33 @@ export type SystemMessage = { }; /** @internal */ -export const Content$inboundSchema: z.ZodType = - z.union([z.string(), z.array(TextChunk$inboundSchema)]); +export const SystemMessageContent$inboundSchema: z.ZodType< + SystemMessageContent, + z.ZodTypeDef, + unknown +> = z.union([z.string(), z.array(TextChunk$inboundSchema)]); /** @internal */ -export type Content$Outbound = string | Array; +export type SystemMessageContent$Outbound = string | Array; /** @internal */ -export const Content$outboundSchema: z.ZodType< - Content$Outbound, +export const SystemMessageContent$outboundSchema: z.ZodType< + SystemMessageContent$Outbound, z.ZodTypeDef, - Content + SystemMessageContent > = z.union([z.string(), z.array(TextChunk$outboundSchema)]); /** * @internal * @deprecated This namespace will be removed in future versions. Use schemas and types that are exported directly from this module. */ -export namespace Content$ { - /** @deprecated use `Content$inboundSchema` instead. */ - export const inboundSchema = Content$inboundSchema; - /** @deprecated use `Content$outboundSchema` instead. */ - export const outboundSchema = Content$outboundSchema; - /** @deprecated use `Content$Outbound` instead. */ - export type Outbound = Content$Outbound; +export namespace SystemMessageContent$ { + /** @deprecated use `SystemMessageContent$inboundSchema` instead. */ + export const inboundSchema = SystemMessageContent$inboundSchema; + /** @deprecated use `SystemMessageContent$outboundSchema` instead. */ + export const outboundSchema = SystemMessageContent$outboundSchema; + /** @deprecated use `SystemMessageContent$Outbound` instead. */ + export type Outbound = SystemMessageContent$Outbound; } /** @internal */ diff --git a/packages/mistralai-gcp/src/models/components/usermessage.ts b/packages/mistralai-gcp/src/models/components/usermessage.ts index 16c7cfe..6f0a2cc 100644 --- a/packages/mistralai-gcp/src/models/components/usermessage.ts +++ b/packages/mistralai-gcp/src/models/components/usermessage.ts @@ -19,7 +19,7 @@ export const UserMessageRole = { export type UserMessageRole = ClosedEnum; export type UserMessage = { - content: string | Array; + content: string | Array | null; role?: UserMessageRole | undefined; }; @@ -80,13 +80,15 @@ export const UserMessage$inboundSchema: z.ZodType< z.ZodTypeDef, unknown > = z.object({ - content: z.union([z.string(), z.array(ContentChunk$inboundSchema)]), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$inboundSchema)]), + ), role: UserMessageRole$inboundSchema.default("user"), }); /** @internal */ export type UserMessage$Outbound = { - content: string | Array; + content: string | Array | null; role: string; }; @@ -96,7 +98,9 @@ export const UserMessage$outboundSchema: z.ZodType< z.ZodTypeDef, UserMessage > = z.object({ - content: z.union([z.string(), z.array(ContentChunk$outboundSchema)]), + content: z.nullable( + z.union([z.string(), z.array(ContentChunk$outboundSchema)]), + ), role: UserMessageRole$outboundSchema.default("user"), }); diff --git a/packages/mistralai-gcp/src/models/errors/httpvalidationerror.ts b/packages/mistralai-gcp/src/models/errors/httpvalidationerror.ts index 84cb533..d9a58dc 100644 --- a/packages/mistralai-gcp/src/models/errors/httpvalidationerror.ts +++ b/packages/mistralai-gcp/src/models/errors/httpvalidationerror.ts @@ -5,16 +5,10 @@ import * as z from "zod"; import * as components from "../components/index.js"; -/** - * Validation Error - */ export type HTTPValidationErrorData = { detail?: Array | undefined; }; -/** - * Validation Error - */ export class HTTPValidationError extends Error { detail?: Array | undefined; diff --git a/packages/mistralai-gcp/src/sdk/sdk.ts b/packages/mistralai-gcp/src/sdk/sdk.ts index 541c977..05834f3 100644 --- a/packages/mistralai-gcp/src/sdk/sdk.ts +++ b/packages/mistralai-gcp/src/sdk/sdk.ts @@ -1,9 +1,9 @@ -import {GoogleAuth} from "google-auth-library"; -import {SDKOptions} from "../lib/config.js"; -import {ClientSDK} from "../lib/sdks.js"; -import {Chat} from "./chat"; -import {Fim} from "./fim"; -import {SDKHooks} from "../hooks"; +import { GoogleAuth } from "google-auth-library"; +import { SDKOptions } from "../lib/config.js"; +import { ClientSDK } from "../lib/sdks.js"; +import { Chat } from "./chat"; +import { Fim } from "./fim"; +import { SDKHooks } from "../hooks"; export type GoogleCloudOptions = { /** The region of the Google Cloud AI Platform endpoint */ @@ -16,6 +16,21 @@ export type GoogleCloudOptions = { projectId: string; } +const LEGACY_MODEL_ID_FORMAT: { [key: string]: string } = { + "codestral-2405": "codestral@2405", + "mistral-large-2407": "mistral-large@2407", + "mistral-nemo-2407": "mistral-nemo@2407", +}; + +function getModelInfo(model: string): [string, string] { + let modelId = LEGACY_MODEL_ID_FORMAT[model]; + if (modelId === undefined) { + modelId = model; + } + model = model.split("-").slice(0, -1).join("-"); + return [model, modelId]; +} + export class MistralGoogleCloud extends ClientSDK { constructor(options: SDKOptions & GoogleCloudOptions = {}) { let projectId = options.projectId ?? ""; @@ -84,19 +99,13 @@ export class MistralGoogleCloud extends ClientSDK { throw new Error("body.model is required and must be a string"); } - const modelParts = body.model.split("-"); - if (modelParts.length < 2) { - throw new Error("model must be in the format 'model-version'"); - } - - const modelVersion = modelParts.pop(); // Get the last element - const model = modelParts.join("-"); // Join the rest back together + const [model, modelId] = getModelInfo(body.model); - if (!model || !modelVersion) { + if (!model || !modelId) { throw new Error("model must be in the format 'model-version'"); } - input.url.pathname = `v1/projects/${projectId}/locations/${options.region}/publishers/mistralai/models/${model}@${modelVersion}:${rawPredictType}`; + input.url.pathname = `v1/projects/${projectId}/locations/${options.region}/publishers/mistralai/models/${modelId}:${rawPredictType}`; body.model = model