diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 144ff73..d435584 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 81b65c50-f2e3-40a3-bd65-346524007b3d management: - docChecksum: c3a7b8cc0fd70dd5c1c061ff478e5c35 + docChecksum: c19f5a86b8045af32a46604ee5478061 docVersion: 0.0.2 speakeasyVersion: 1.372.0 generationVersion: 2.399.0 - releaseVersion: 1.1.1 - configChecksum: 3dbc80018ebe82836a9f8c940a17e515 + releaseVersion: 1.1.2 + configChecksum: 73da950b6151099c2d5a87da13ba3360 repoURL: https://github.com/mistralai/client-ts.git installationURL: https://github.com/mistralai/client-ts published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f2582dd..4de31ce 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true typescript: - version: 1.1.1 + version: 1.1.2 additionalDependencies: dependencies: {} devDependencies: {} diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 11cba22..64aa6bd 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,40 +2,40 @@ speakeasyVersion: 1.372.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:f5299139946da6306d94aa4d6b07bd3a7daea319643fdbf6f61d3ee4543a92e2 + sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:de11048beb076fb5fd0691bc0b7792ae82db07daa88691ab34de4c0f77268d89 - sourceBlobDigest: sha256:7473992b5ae6634e4974645784e86554e3f2d5adb37c47cf69be8b0241063696 + sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f + sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bb3576b41d23f8bada9e8a1a94766fa4c9c5db8232fd82794c34373aa867b1f9 - sourceBlobDigest: sha256:59cff5081a9e85dc78d4bf658cf15c8ac4cf9fcb2cafc132cb36d724d6c86eb8 + sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 + sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:f5299139946da6306d94aa4d6b07bd3a7daea319643fdbf6f61d3ee4543a92e2 + sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 outLocation: ./packages/mistralai-azure mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:de11048beb076fb5fd0691bc0b7792ae82db07daa88691ab34de4c0f77268d89 - sourceBlobDigest: sha256:7473992b5ae6634e4974645784e86554e3f2d5adb37c47cf69be8b0241063696 + sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f + sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba outLocation: ./packages/mistralai-gcp mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bb3576b41d23f8bada9e8a1a94766fa4c9c5db8232fd82794c34373aa867b1f9 - sourceBlobDigest: sha256:59cff5081a9e85dc78d4bf658cf15c8ac4cf9fcb2cafc132cb36d724d6c86eb8 + sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 + sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e outLocation: /Users/gaspard/public-mistral/client-ts workflow: workflowVersion: 1.0.0 diff --git a/docs/models/components/chatcompletionrequest.md b/docs/models/components/chatcompletionrequest.md index b246302..b250efc 100644 --- a/docs/models/components/chatcompletionrequest.md +++ b/docs/models/components/chatcompletionrequest.md @@ -20,7 +20,7 @@ let value: ChatCompletionRequest = { | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/models/components/chatcompletionstreamrequest.md b/docs/models/components/chatcompletionstreamrequest.md index dcb49f8..2322da3 100644 --- a/docs/models/components/chatcompletionstreamrequest.md +++ b/docs/models/components/chatcompletionstreamrequest.md @@ -20,7 +20,7 @@ let value: ChatCompletionStreamRequest = { | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/jsr.json b/jsr.json index fc9e4f6..b4df6c2 100644 --- a/jsr.json +++ b/jsr.json @@ -2,7 +2,7 @@ { "name": "@mistralai/mistralai", - "version": "1.1.1", + "version": "1.1.2", "exports": { ".": "./src/index.ts", "./models/errors": "./src/models/errors/index.ts", diff --git a/package-lock.json b/package-lock.json index 7455799..53deb10 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@mistralai/mistralai", - "version": "1.1.1", + "version": "1.1.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@mistralai/mistralai", - "version": "1.1.1", + "version": "1.1.2", "devDependencies": { "@typescript-eslint/eslint-plugin": "^7.7.1", "@typescript-eslint/parser": "^7.7.1", diff --git a/package.json b/package.json index a63c35b..44778c5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@mistralai/mistralai", - "version": "1.1.1", + "version": "1.1.2", "author": "Speakeasy", "main": "./index.js", "sideEffects": false, diff --git a/packages/mistralai-gcp/.speakeasy/gen.lock b/packages/mistralai-gcp/.speakeasy/gen.lock index 6d6893b..c9dc697 100644 --- a/packages/mistralai-gcp/.speakeasy/gen.lock +++ b/packages/mistralai-gcp/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: c6044247-eaf9-46da-b078-0e1334e93be2 management: - docChecksum: 92df54490ab91f38782a1495d03835bf + docChecksum: 4cc6e7c5c5ba15491872c600d4a247ef docVersion: 0.0.2 speakeasyVersion: 1.372.0 generationVersion: 2.399.0 - releaseVersion: 1.1.1 - configChecksum: 50c316525d725616442c63213468ca2c + releaseVersion: 1.1.2 + configChecksum: aee01235f4e46aef1a055407ead7ae83 repoURL: https://github.com/mistralai/client-ts.git repoSubDirectory: packages/mistralai-gcp installationURL: https://gitpkg.now.sh/mistralai/client-ts/packages/mistralai-gcp diff --git a/packages/mistralai-gcp/.speakeasy/gen.yaml b/packages/mistralai-gcp/.speakeasy/gen.yaml index b8e19b0..5059078 100644 --- a/packages/mistralai-gcp/.speakeasy/gen.yaml +++ b/packages/mistralai-gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true typescript: - version: 1.1.1 + version: 1.1.2 additionalDependencies: dependencies: google-auth-library: ^9.11.0 diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md b/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md index 2539206..92bf889 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionrequest.md @@ -20,7 +20,7 @@ let value: ChatCompletionRequest = { | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md index 52341b8..4357cbd 100644 --- a/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md +++ b/packages/mistralai-gcp/docs/models/components/chatcompletionstreamrequest.md @@ -20,7 +20,7 @@ let value: ChatCompletionStreamRequest = { | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `topP` | *number* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/packages/mistralai-gcp/jsr.json b/packages/mistralai-gcp/jsr.json index 0ca3e3b..bbb4130 100644 --- a/packages/mistralai-gcp/jsr.json +++ b/packages/mistralai-gcp/jsr.json @@ -2,7 +2,7 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.1", + "version": "1.1.2", "exports": { ".": "./src/index.ts", "./models/errors": "./src/models/errors/index.ts", diff --git a/packages/mistralai-gcp/package-lock.json b/packages/mistralai-gcp/package-lock.json index 4b8efaa..b492dab 100644 --- a/packages/mistralai-gcp/package-lock.json +++ b/packages/mistralai-gcp/package-lock.json @@ -1,12 +1,12 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.1", + "version": "1.1.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@mistralai/mistralai-gcp", - "version": "1.1.1", + "version": "1.1.2", "dependencies": { "google-auth-library": "^9.11.0" }, diff --git a/packages/mistralai-gcp/package.json b/packages/mistralai-gcp/package.json index 295db87..ac08081 100644 --- a/packages/mistralai-gcp/package.json +++ b/packages/mistralai-gcp/package.json @@ -1,6 +1,6 @@ { "name": "@mistralai/mistralai-gcp", - "version": "1.1.1", + "version": "1.1.2", "author": "Speakeasy", "main": "./index.js", "sideEffects": false, diff --git a/packages/mistralai-gcp/src/lib/config.ts b/packages/mistralai-gcp/src/lib/config.ts index caef65f..201ff01 100644 --- a/packages/mistralai-gcp/src/lib/config.ts +++ b/packages/mistralai-gcp/src/lib/config.ts @@ -55,7 +55,7 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { export const SDK_METADATA = { language: "typescript", openapiDocVersion: "0.0.2", - sdkVersion: "1.1.1", + sdkVersion: "1.1.2", genVersion: "2.399.0", - userAgent: "speakeasy-sdk/typescript 1.1.1 2.399.0 0.0.2 @mistralai/mistralai-gcp", + userAgent: "speakeasy-sdk/typescript 1.1.2 2.399.0 0.0.2 @mistralai/mistralai-gcp", } as const; diff --git a/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts b/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts index 1ac8ab2..8fdfea1 100644 --- a/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts +++ b/packages/mistralai-gcp/src/models/components/chatcompletionrequest.ts @@ -57,7 +57,7 @@ export type ChatCompletionRequestToolChoice = ClosedEnum; export type ChatCompletionStreamRequest = { /** - * ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + * ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. */ model: string | null; /** diff --git a/src/lib/config.ts b/src/lib/config.ts index 992282d..bf1d61b 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -55,7 +55,7 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { export const SDK_METADATA = { language: "typescript", openapiDocVersion: "0.0.2", - sdkVersion: "1.1.1", + sdkVersion: "1.1.2", genVersion: "2.399.0", - userAgent: "speakeasy-sdk/typescript 1.1.1 2.399.0 0.0.2 @mistralai/mistralai", + userAgent: "speakeasy-sdk/typescript 1.1.2 2.399.0 0.0.2 @mistralai/mistralai", } as const; diff --git a/src/models/components/chatcompletionrequest.ts b/src/models/components/chatcompletionrequest.ts index ec09ea0..3132d71 100644 --- a/src/models/components/chatcompletionrequest.ts +++ b/src/models/components/chatcompletionrequest.ts @@ -57,7 +57,7 @@ export type ToolChoice = ClosedEnum; export type ChatCompletionRequest = { /** - * ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + * ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. */ model: string | null; /** diff --git a/src/models/components/chatcompletionstreamrequest.ts b/src/models/components/chatcompletionstreamrequest.ts index 1f63a69..b38fd96 100644 --- a/src/models/components/chatcompletionstreamrequest.ts +++ b/src/models/components/chatcompletionstreamrequest.ts @@ -59,7 +59,7 @@ export type ChatCompletionStreamRequestToolChoice = ClosedEnum< export type ChatCompletionStreamRequest = { /** - * ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + * ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. */ model: string | null; /**