From ab4b9db53d6406eafd5cad47b32dd9667cd74c22 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 01:27:47 -0400 Subject: [PATCH 1/7] api renames --- pyproject.toml | 2 +- substrate/GEN_VERSION | 2 +- substrate/__init__.py | 22 +-- substrate/_version.py | 2 +- substrate/core/models.py | 155 +++++++++++----- substrate/future_dataclass_models.py | 206 ++++++++++++++------- substrate/nodes.py | 257 +++++++++++++-------------- substrate/typeddict_models.py | 153 +++++++++++----- 8 files changed, 487 insertions(+), 312 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b330006..df0863f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "substrate" -version = "220240604.0.0" +version = "220240612.0.0" description = "Substrate Python SDK" readme = "README.md" authors = [ "vprtwn ", "liamgriffiths ",] diff --git a/substrate/GEN_VERSION b/substrate/GEN_VERSION index fb0a55b..3caa8ba 100644 --- a/substrate/GEN_VERSION +++ b/substrate/GEN_VERSION @@ -1 +1 @@ -20240604.20240611 \ No newline at end of file +20240612.20240612 \ No newline at end of file diff --git a/substrate/__init__.py b/substrate/__init__.py index aab11c5..bc3fe59 100644 --- a/substrate/__init__.py +++ b/substrate/__init__.py @@ -1,21 +1,21 @@ """ 𐃏 Substrate Python SDK -20240604.20240611 +20240612.20240612 """ from .nodes import ( CLIP, - XTTSV2, JinaV2, - FillMask, EmbedText, EmbedImage, + EraseImage, Experimental, FetchVectors, Firellava13B, GenerateJSON, GenerateText, + InpaintImage, UpscaleImage, DeleteVectors, GenerateImage, @@ -24,11 +24,11 @@ MultiEmbedText, MultiEmbedImage, SegmentAnything, - TranscribeMedia, ListVectorStores, Llama3Instruct8B, QueryVectorStore, RemoveBackground, + TranscribeSpeech, BatchGenerateJSON, BatchGenerateText, DeleteVectorStore, @@ -36,13 +36,11 @@ Mistral7BInstruct, MultiGenerateJSON, MultiGenerateText, + MultiInpaintImage, SegmentUnderPoint, - GenerateTextVision, MultiGenerateImage, - GenerativeEditImage, Mixtral8x7BInstruct, FindOrCreateVectorStore, - MultiGenerativeEditImage, StableDiffusionXLInpaint, StableDiffusionXLLightning, StableDiffusionXLControlNet, @@ -65,7 +63,6 @@ "BatchGenerateJSON", "GenerateJSON", "MultiGenerateJSON", - "GenerateTextVision", "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", @@ -73,16 +70,15 @@ "Firellava13B", "GenerateImage", "MultiGenerateImage", - "GenerativeEditImage", - "MultiGenerativeEditImage", + "InpaintImage", + "MultiInpaintImage", "StableDiffusionXLLightning", "StableDiffusionXLInpaint", "StableDiffusionXLControlNet", - "TranscribeMedia", + "TranscribeSpeech", "GenerateSpeech", - "XTTSV2", "RemoveBackground", - "FillMask", + "EraseImage", "UpscaleImage", "SegmentUnderPoint", "SegmentAnything", diff --git a/substrate/_version.py b/substrate/_version.py index d6c93dd..f4680f1 100644 --- a/substrate/_version.py +++ b/substrate/_version.py @@ -1 +1 @@ -__version__ = "220240604.0.0" +__version__ = "220240612.0.0" diff --git a/substrate/core/models.py b/substrate/core/models.py index 73c2534..83afea1 100644 --- a/substrate/core/models.py +++ b/substrate/core/models.py @@ -89,6 +89,10 @@ class GenerateTextIn(BaseModel): """ Input prompt. """ + image_uris: Optional[List[str]] = None + """ + Image prompts. + """ temperature: Annotated[float, Field(ge=0.0, le=1.0)] = 0.4 """ Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. @@ -97,14 +101,15 @@ class GenerateTextIn(BaseModel): """ Maximum number of tokens to generate. """ - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", "Llama3Instruct70B", + "Firellava13B", ] = "Llama3Instruct8B" """ - Selected node. + Selected model. `Firellava13B` is automatically selected when `image_uris` is provided. """ @@ -132,9 +137,9 @@ class GenerateJSONIn(BaseModel): """ Maximum number of tokens to generate. """ - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ - Selected node. + Selected model. """ @@ -166,14 +171,14 @@ class MultiGenerateTextIn(BaseModel): """ Maximum number of tokens to generate. """ - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", "Llama3Instruct70B", ] = "Llama3Instruct8B" """ - Selected node. + Selected model. """ @@ -227,9 +232,9 @@ class MultiGenerateJSONIn(BaseModel): """ Maximum number of tokens to generate. """ - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ - Selected node. + Selected model. """ @@ -241,9 +246,9 @@ class MultiGenerateJSONOut(BaseModel): class BatchGenerateJSONIn(BaseModel): - node: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ - Selected node. + Selected model. """ prompts: List[str] """ @@ -275,6 +280,10 @@ class Mistral7BInstructIn(BaseModel): """ Input prompt. """ + system_prompt: Optional[str] = None + """ + System prompt. + """ num_choices: Annotated[int, Field(ge=1, le=8)] = 1 """ Number of choices to generate. @@ -285,7 +294,23 @@ class Mistral7BInstructIn(BaseModel): """ temperature: Annotated[Optional[float], Field(ge=0.0, le=1.0)] = None """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 0.0 + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.0 + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.1 + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: Annotated[float, Field(ge=0.0, le=1.0)] = 0.95 + """ + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -316,6 +341,10 @@ class Mixtral8x7BInstructIn(BaseModel): """ Input prompt. """ + system_prompt: Optional[str] = None + """ + System prompt. + """ num_choices: Annotated[int, Field(ge=1, le=8)] = 1 """ Number of choices to generate. @@ -326,7 +355,23 @@ class Mixtral8x7BInstructIn(BaseModel): """ temperature: Annotated[Optional[float], Field(ge=0.0, le=1.0)] = None """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 0.0 + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.0 + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.1 + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: Annotated[float, Field(ge=0.0, le=1.0)] = 0.95 + """ + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -357,13 +402,33 @@ class Llama3Instruct8BIn(BaseModel): """ Input prompt. """ + system_prompt: Optional[str] = None + """ + System prompt. + """ num_choices: Annotated[int, Field(ge=1, le=8)] = 1 """ Number of choices to generate. """ temperature: Annotated[Optional[float], Field(ge=0.0, le=1.0)] = None """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 0.0 + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.0 + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.1 + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: Annotated[float, Field(ge=0.0, le=1.0)] = 0.95 + """ + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -398,13 +463,33 @@ class Llama3Instruct70BIn(BaseModel): """ Input prompt. """ + system_prompt: Optional[str] = None + """ + System prompt. + """ num_choices: Annotated[int, Field(ge=1, le=8)] = 1 """ Number of choices to generate. """ temperature: Annotated[Optional[float], Field(ge=0.0, le=1.0)] = None """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 0.0 + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.0 + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: Annotated[float, Field(ge=-2.0, le=2.0)] = 1.1 + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: Annotated[float, Field(ge=0.0, le=1.0)] = 0.95 + """ + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -426,28 +511,6 @@ class Llama3Instruct70BOut(BaseModel): """ -class GenerateTextVisionIn(BaseModel): - prompt: str - """ - Text prompt. - """ - image_uris: List[str] - """ - Image prompts. - """ - max_tokens: int = 800 - """ - Maximum number of tokens to generate. - """ - - -class GenerateTextVisionOut(BaseModel): - text: str - """ - Text response. - """ - - class Firellava13BIn(BaseModel): prompt: str """ @@ -457,7 +520,7 @@ class Firellava13BIn(BaseModel): """ Image prompts. """ - max_tokens: int = 800 + max_tokens: Optional[int] = None """ Maximum number of tokens to generate. """ @@ -701,7 +764,7 @@ class StableDiffusionXLControlNetOut(BaseModel): """ -class GenerativeEditImageIn(BaseModel): +class InpaintImageIn(BaseModel): image_uri: str """ Original image. @@ -720,14 +783,14 @@ class GenerativeEditImageIn(BaseModel): """ -class GenerativeEditImageOut(BaseModel): +class InpaintImageOut(BaseModel): image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. """ -class MultiGenerativeEditImageIn(BaseModel): +class MultiInpaintImageIn(BaseModel): image_uri: str """ Original image. @@ -750,8 +813,8 @@ class MultiGenerativeEditImageIn(BaseModel): """ -class MultiGenerativeEditImageOut(BaseModel): - outputs: List[GenerativeEditImageOut] +class MultiInpaintImageOut(BaseModel): + outputs: List[InpaintImageOut] """ Generated images. """ @@ -833,7 +896,7 @@ class Point(BaseModel): """ -class FillMaskIn(BaseModel): +class EraseImageIn(BaseModel): image_uri: str """ Input image. @@ -848,7 +911,7 @@ class FillMaskIn(BaseModel): """ -class FillMaskOut(BaseModel): +class EraseImageOut(BaseModel): image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -995,7 +1058,7 @@ class SegmentAnythingOut(BaseModel): """ -class TranscribeMediaIn(BaseModel): +class TranscribeSpeechIn(BaseModel): audio_uri: str """ Input audio. @@ -1079,7 +1142,7 @@ class ChapterMarker(BaseModel): """ -class TranscribeMediaOut(BaseModel): +class TranscribeSpeechOut(BaseModel): text: str """ Transcribed text. diff --git a/substrate/future_dataclass_models.py b/substrate/future_dataclass_models.py index 46ff94c..ecaeb38 100644 --- a/substrate/future_dataclass_models.py +++ b/substrate/future_dataclass_models.py @@ -133,6 +133,11 @@ class FutureGenerateTextIn: (Future reference) Input prompt. """ + image_uris: Optional[List[str]] = None + """ + (Future reference) + Image prompts. + """ temperature: float = 0.4 """ (Future reference) @@ -143,15 +148,16 @@ class FutureGenerateTextIn: (Future reference) Maximum number of tokens to generate. """ - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", "Llama3Instruct70B", + "Firellava13B", ] = "Llama3Instruct8B" """ (Future reference) - Selected node. + Selected model. `Firellava13B` is automatically selected when `image_uris` is provided. """ @@ -194,10 +200,10 @@ class FutureGenerateJSONIn: (Future reference) Maximum number of tokens to generate. """ - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ (Future reference) - Selected node. + Selected model. """ @@ -245,7 +251,7 @@ class FutureMultiGenerateTextIn: (Future reference) Maximum number of tokens to generate. """ - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", @@ -253,7 +259,7 @@ class FutureMultiGenerateTextIn: ] = "Llama3Instruct8B" """ (Future reference) - Selected node. + Selected model. """ @@ -337,10 +343,10 @@ class FutureMultiGenerateJSONIn: (Future reference) Maximum number of tokens to generate. """ - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ (Future reference) - Selected node. + Selected model. """ @@ -373,10 +379,10 @@ class FutureBatchGenerateJSONIn: (Future reference) JSON schema to guide `json_object` response. """ - node: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ (Future reference) - Selected node. + Selected model. """ temperature: float = 0.4 """ @@ -414,6 +420,11 @@ class FutureMistral7BInstructIn: (Future reference) Input prompt. """ + system_prompt: Optional[str] = None + """ + (Future reference) + System prompt. + """ num_choices: int = 1 """ (Future reference) @@ -427,7 +438,27 @@ class FutureMistral7BInstructIn: temperature: Optional[float] = None """ (Future reference) - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: float = 0.0 + """ + (Future reference) + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: float = 1.0 + """ + (Future reference) + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: float = 1.1 + """ + (Future reference) + Higher values increase the likelihood of new topics appearing. + """ + top_p: float = 0.95 + """ + (Future reference) + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -478,6 +509,11 @@ class FutureMixtral8x7BInstructIn: (Future reference) Input prompt. """ + system_prompt: Optional[str] = None + """ + (Future reference) + System prompt. + """ num_choices: int = 1 """ (Future reference) @@ -491,7 +527,27 @@ class FutureMixtral8x7BInstructIn: temperature: Optional[float] = None """ (Future reference) - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: float = 0.0 + """ + (Future reference) + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: float = 1.0 + """ + (Future reference) + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: float = 1.1 + """ + (Future reference) + Higher values increase the likelihood of new topics appearing. + """ + top_p: float = 0.95 + """ + (Future reference) + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -542,6 +598,11 @@ class FutureLlama3Instruct8BIn: (Future reference) Input prompt. """ + system_prompt: Optional[str] = None + """ + (Future reference) + System prompt. + """ num_choices: int = 1 """ (Future reference) @@ -550,7 +611,27 @@ class FutureLlama3Instruct8BIn: temperature: Optional[float] = None """ (Future reference) - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: float = 0.0 + """ + (Future reference) + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: float = 1.0 + """ + (Future reference) + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: float = 1.1 + """ + (Future reference) + Higher values increase the likelihood of new topics appearing. + """ + top_p: float = 0.95 + """ + (Future reference) + Probability below which less likely tokens are filtered out. """ max_tokens: Optional[int] = None """ @@ -606,6 +687,11 @@ class FutureLlama3Instruct70BIn: (Future reference) Input prompt. """ + system_prompt: Optional[str] = None + """ + (Future reference) + System prompt. + """ num_choices: int = 1 """ (Future reference) @@ -614,74 +700,58 @@ class FutureLlama3Instruct70BIn: temperature: Optional[float] = None """ (Future reference) - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. """ - max_tokens: Optional[int] = None + frequency_penalty: float = 0.0 """ (Future reference) - Maximum number of tokens to generate. + Higher values decrease the likelihood of repeating previous tokens. """ - - -@dataclass -class Llama3Instruct70BChoice: + repetition_penalty: float = 1.0 """ - Future reference to Llama3Instruct70BChoice + (Future reference) + Higher values decrease the likelihood of repeated sequences. """ - - text: Optional[str] = None + presence_penalty: float = 1.1 """ (Future reference) - Text response. + Higher values increase the likelihood of new topics appearing. """ - - -@dataclass -class FutureLlama3Instruct70BOut: + top_p: float = 0.95 """ - Future reference to FutureLlama3Instruct70BOut + (Future reference) + Probability below which less likely tokens are filtered out. """ - - choices: List[Llama3Instruct70BChoice] + max_tokens: Optional[int] = None """ (Future reference) - Response choices. + Maximum number of tokens to generate. """ @dataclass -class FutureGenerateTextVisionIn: +class Llama3Instruct70BChoice: """ - Future reference to FutureGenerateTextVisionIn + Future reference to Llama3Instruct70BChoice """ - prompt: str - """ - (Future reference) - Text prompt. - """ - image_uris: List[str] - """ - (Future reference) - Image prompts. - """ - max_tokens: int = 800 + text: Optional[str] = None """ (Future reference) - Maximum number of tokens to generate. + Text response. """ @dataclass -class FutureGenerateTextVisionOut: +class FutureLlama3Instruct70BOut: """ - Future reference to FutureGenerateTextVisionOut + Future reference to FutureLlama3Instruct70BOut """ - text: str + choices: List[Llama3Instruct70BChoice] """ (Future reference) - Text response. + Response choices. """ @@ -701,7 +771,7 @@ class FutureFirellava13BIn: (Future reference) Image prompts. """ - max_tokens: int = 800 + max_tokens: Optional[int] = None """ (Future reference) Maximum number of tokens to generate. @@ -1066,9 +1136,9 @@ class FutureStableDiffusionXLControlNetOut: @dataclass -class FutureGenerativeEditImageIn: +class FutureInpaintImageIn: """ - Future reference to FutureGenerativeEditImageIn + Future reference to FutureInpaintImageIn """ image_uri: str @@ -1094,9 +1164,9 @@ class FutureGenerativeEditImageIn: @dataclass -class FutureGenerativeEditImageOut: +class FutureInpaintImageOut: """ - Future reference to FutureGenerativeEditImageOut + Future reference to FutureInpaintImageOut """ image_uri: str @@ -1107,9 +1177,9 @@ class FutureGenerativeEditImageOut: @dataclass -class FutureMultiGenerativeEditImageIn: +class FutureMultiInpaintImageIn: """ - Future reference to FutureMultiGenerativeEditImageIn + Future reference to FutureMultiInpaintImageIn """ image_uri: str @@ -1140,12 +1210,12 @@ class FutureMultiGenerativeEditImageIn: @dataclass -class FutureMultiGenerativeEditImageOut: +class FutureMultiInpaintImageOut: """ - Future reference to FutureMultiGenerativeEditImageOut + Future reference to FutureMultiInpaintImageOut """ - outputs: List[FutureGenerativeEditImageOut] + outputs: List[FutureInpaintImageOut] """ (Future reference) Generated images. @@ -1265,9 +1335,9 @@ class Point: @dataclass -class FutureFillMaskIn: +class FutureEraseImageIn: """ - Future reference to FutureFillMaskIn + Future reference to FutureEraseImageIn """ image_uri: str @@ -1288,9 +1358,9 @@ class FutureFillMaskIn: @dataclass -class FutureFillMaskOut: +class FutureEraseImageOut: """ - Future reference to FutureFillMaskOut + Future reference to FutureEraseImageOut """ image_uri: str @@ -1527,9 +1597,9 @@ class FutureSegmentAnythingOut: @dataclass -class FutureTranscribeMediaIn: +class FutureTranscribeSpeechIn: """ - Future reference to FutureTranscribeMediaIn + Future reference to FutureTranscribeSpeechIn """ audio_uri: str @@ -1649,9 +1719,9 @@ class ChapterMarker: @dataclass -class FutureTranscribeMediaOut: +class FutureTranscribeSpeechOut: """ - Future reference to FutureTranscribeMediaOut + Future reference to FutureTranscribeSpeechOut """ text: str diff --git a/substrate/nodes.py b/substrate/nodes.py index 25c614c..5d86915 100644 --- a/substrate/nodes.py +++ b/substrate/nodes.py @@ -10,15 +10,15 @@ from .core.models import ( CLIPOut, JinaV2Out, - XTTSV2Out, - FillMaskOut, EmbedTextOut, EmbedImageOut, + EraseImageOut, ExperimentalOut, FetchVectorsOut, Firellava13BOut, GenerateJSONOut, GenerateTextOut, + InpaintImageOut, UpscaleImageOut, DeleteVectorsOut, GenerateImageOut, @@ -27,11 +27,11 @@ MultiEmbedTextOut, MultiEmbedImageOut, SegmentAnythingOut, - TranscribeMediaOut, ListVectorStoresOut, Llama3Instruct8BOut, QueryVectorStoreOut, RemoveBackgroundOut, + TranscribeSpeechOut, BatchGenerateJSONOut, BatchGenerateTextOut, DeleteVectorStoreOut, @@ -39,13 +39,11 @@ Mistral7BInstructOut, MultiGenerateJSONOut, MultiGenerateTextOut, + MultiInpaintImageOut, SegmentUnderPointOut, - GenerateTextVisionOut, MultiGenerateImageOut, - GenerativeEditImageOut, Mixtral8x7BInstructOut, FindOrCreateVectorStoreOut, - MultiGenerativeEditImageOut, StableDiffusionXLInpaintOut, StableDiffusionXLLightningOut, StableDiffusionXLControlNetOut, @@ -54,15 +52,15 @@ from .future_dataclass_models import ( FutureCLIPOut, FutureJinaV2Out, - FutureXTTSV2Out, - FutureFillMaskOut, FutureEmbedTextOut, FutureEmbedImageOut, + FutureEraseImageOut, FutureExperimentalOut, FutureFetchVectorsOut, FutureFirellava13BOut, FutureGenerateJSONOut, FutureGenerateTextOut, + FutureInpaintImageOut, FutureUpscaleImageOut, FutureDeleteVectorsOut, FutureGenerateImageOut, @@ -71,11 +69,11 @@ FutureMultiEmbedTextOut, FutureMultiEmbedImageOut, FutureSegmentAnythingOut, - FutureTranscribeMediaOut, FutureListVectorStoresOut, FutureLlama3Instruct8BOut, FutureQueryVectorStoreOut, FutureRemoveBackgroundOut, + FutureTranscribeSpeechOut, FutureBatchGenerateJSONOut, FutureBatchGenerateTextOut, FutureDeleteVectorStoreOut, @@ -83,13 +81,11 @@ FutureMistral7BInstructOut, FutureMultiGenerateJSONOut, FutureMultiGenerateTextOut, + FutureMultiInpaintImageOut, FutureSegmentUnderPointOut, - FutureGenerateTextVisionOut, FutureMultiGenerateImageOut, - FutureGenerativeEditImageOut, FutureMixtral8x7BInstructOut, FutureFindOrCreateVectorStoreOut, - FutureMultiGenerativeEditImageOut, FutureStableDiffusionXLInpaintOut, FutureStableDiffusionXLLightningOut, FutureStableDiffusionXLControlNetOut, @@ -127,30 +123,34 @@ class GenerateText(CoreNode[GenerateTextOut]): def __init__( self, prompt: str, + image_uris: Optional[List[str]] = None, temperature: float = 0.4, max_tokens: Optional[int] = None, - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", "Llama3Instruct70B", + "Firellava13B", ] = "Llama3Instruct8B", hide: bool = False, ): """ Args: prompt: Input prompt. + image_uris: Image prompts. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. - node: Selected node. + model: Selected model. `Firellava13B` is automatically selected when `image_uris` is provided. https://substrate.run/nodes#GenerateText """ super().__init__( prompt=prompt, + image_uris=image_uris, temperature=temperature, max_tokens=max_tokens, - node=node, + model=model, hide=hide, out_type=GenerateTextOut, ) @@ -175,7 +175,7 @@ def __init__( json_schema: Dict[str, Any], temperature: float = 0.4, max_tokens: Optional[int] = None, - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, ): """ @@ -184,7 +184,7 @@ def __init__( json_schema: JSON schema to guide `json_object` response. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. - node: Selected node. + model: Selected model. https://substrate.run/nodes#GenerateJSON """ @@ -193,7 +193,7 @@ def __init__( json_schema=json_schema, temperature=temperature, max_tokens=max_tokens, - node=node, + model=model, hide=hide, out_type=GenerateJSONOut, ) @@ -218,7 +218,7 @@ def __init__( num_choices: int, temperature: float = 0.4, max_tokens: Optional[int] = None, - node: Literal[ + model: Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", @@ -232,7 +232,7 @@ def __init__( num_choices: Number of choices to generate. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. - node: Selected node. + model: Selected model. https://substrate.run/nodes#MultiGenerateText """ @@ -241,7 +241,7 @@ def __init__( num_choices=num_choices, temperature=temperature, max_tokens=max_tokens, - node=node, + model=model, hide=hide, out_type=MultiGenerateTextOut, ) @@ -304,7 +304,7 @@ def __init__( num_choices: int, temperature: float = 0.4, max_tokens: Optional[int] = None, - node: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", + model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, ): """ @@ -314,7 +314,7 @@ def __init__( num_choices: Number of choices to generate. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. - node: Selected node. + model: Selected model. https://substrate.run/nodes#MultiGenerateJSON """ @@ -324,7 +324,7 @@ def __init__( num_choices=num_choices, temperature=temperature, max_tokens=max_tokens, - node=node, + model=model, hide=hide, out_type=MultiGenerateJSONOut, ) @@ -347,7 +347,7 @@ def __init__( self, prompts: List[str], json_schema: Dict[str, Any], - node: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", temperature: float = 0.4, max_tokens: Optional[int] = None, hide: bool = False, @@ -356,7 +356,7 @@ def __init__( Args: prompts: Batch input prompts. json_schema: JSON schema to guide `json_object` response. - node: Selected node. + model: Selected model. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. @@ -365,7 +365,7 @@ def __init__( super().__init__( prompts=prompts, json_schema=json_schema, - node=node, + model=model, temperature=temperature, max_tokens=max_tokens, hide=hide, @@ -389,27 +389,42 @@ class Mistral7BInstruct(CoreNode[Mistral7BInstructOut]): def __init__( self, prompt: str, + system_prompt: Optional[str] = None, num_choices: int = 1, json_schema: Optional[Dict[str, Any]] = None, temperature: Optional[float] = None, + frequency_penalty: float = 0.0, + repetition_penalty: float = 1.0, + presence_penalty: float = 1.1, + top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, ): """ Args: prompt: Input prompt. + system_prompt: System prompt. num_choices: Number of choices to generate. json_schema: JSON schema to guide response. - temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + temperature: Higher values make the output more random, lower values make the output more deterministic. + frequency_penalty: Higher values decrease the likelihood of repeating previous tokens. + repetition_penalty: Higher values decrease the likelihood of repeated sequences. + presence_penalty: Higher values increase the likelihood of new topics appearing. + top_p: Probability below which less likely tokens are filtered out. max_tokens: Maximum number of tokens to generate. https://substrate.run/nodes#Mistral7BInstruct """ super().__init__( prompt=prompt, + system_prompt=system_prompt, num_choices=num_choices, json_schema=json_schema, temperature=temperature, + frequency_penalty=frequency_penalty, + repetition_penalty=repetition_penalty, + presence_penalty=presence_penalty, + top_p=top_p, max_tokens=max_tokens, hide=hide, out_type=Mistral7BInstructOut, @@ -432,27 +447,42 @@ class Mixtral8x7BInstruct(CoreNode[Mixtral8x7BInstructOut]): def __init__( self, prompt: str, + system_prompt: Optional[str] = None, num_choices: int = 1, json_schema: Optional[Dict[str, Any]] = None, temperature: Optional[float] = None, + frequency_penalty: float = 0.0, + repetition_penalty: float = 1.0, + presence_penalty: float = 1.1, + top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, ): """ Args: prompt: Input prompt. + system_prompt: System prompt. num_choices: Number of choices to generate. json_schema: JSON schema to guide response. - temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + temperature: Higher values make the output more random, lower values make the output more deterministic. + frequency_penalty: Higher values decrease the likelihood of repeating previous tokens. + repetition_penalty: Higher values decrease the likelihood of repeated sequences. + presence_penalty: Higher values increase the likelihood of new topics appearing. + top_p: Probability below which less likely tokens are filtered out. max_tokens: Maximum number of tokens to generate. https://substrate.run/nodes#Mixtral8x7BInstruct """ super().__init__( prompt=prompt, + system_prompt=system_prompt, num_choices=num_choices, json_schema=json_schema, temperature=temperature, + frequency_penalty=frequency_penalty, + repetition_penalty=repetition_penalty, + presence_penalty=presence_penalty, + top_p=top_p, max_tokens=max_tokens, hide=hide, out_type=Mixtral8x7BInstructOut, @@ -475,8 +505,13 @@ class Llama3Instruct8B(CoreNode[Llama3Instruct8BOut]): def __init__( self, prompt: str, + system_prompt: Optional[str] = None, num_choices: int = 1, temperature: Optional[float] = None, + frequency_penalty: float = 0.0, + repetition_penalty: float = 1.0, + presence_penalty: float = 1.1, + top_p: float = 0.95, max_tokens: Optional[int] = None, json_schema: Optional[Dict[str, Any]] = None, hide: bool = False, @@ -484,8 +519,13 @@ def __init__( """ Args: prompt: Input prompt. + system_prompt: System prompt. num_choices: Number of choices to generate. - temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + temperature: Higher values make the output more random, lower values make the output more deterministic. + frequency_penalty: Higher values decrease the likelihood of repeating previous tokens. + repetition_penalty: Higher values decrease the likelihood of repeated sequences. + presence_penalty: Higher values increase the likelihood of new topics appearing. + top_p: Probability below which less likely tokens are filtered out. max_tokens: Maximum number of tokens to generate. json_schema: JSON schema to guide response. @@ -493,8 +533,13 @@ def __init__( """ super().__init__( prompt=prompt, + system_prompt=system_prompt, num_choices=num_choices, temperature=temperature, + frequency_penalty=frequency_penalty, + repetition_penalty=repetition_penalty, + presence_penalty=presence_penalty, + top_p=top_p, max_tokens=max_tokens, json_schema=json_schema, hide=hide, @@ -518,24 +563,39 @@ class Llama3Instruct70B(CoreNode[Llama3Instruct70BOut]): def __init__( self, prompt: str, + system_prompt: Optional[str] = None, num_choices: int = 1, temperature: Optional[float] = None, + frequency_penalty: float = 0.0, + repetition_penalty: float = 1.0, + presence_penalty: float = 1.1, + top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, ): """ Args: prompt: Input prompt. + system_prompt: System prompt. num_choices: Number of choices to generate. - temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + temperature: Higher values make the output more random, lower values make the output more deterministic. + frequency_penalty: Higher values decrease the likelihood of repeating previous tokens. + repetition_penalty: Higher values decrease the likelihood of repeated sequences. + presence_penalty: Higher values increase the likelihood of new topics appearing. + top_p: Probability below which less likely tokens are filtered out. max_tokens: Maximum number of tokens to generate. https://substrate.run/nodes#Llama3Instruct70B """ super().__init__( prompt=prompt, + system_prompt=system_prompt, num_choices=num_choices, temperature=temperature, + frequency_penalty=frequency_penalty, + repetition_penalty=repetition_penalty, + presence_penalty=presence_penalty, + top_p=top_p, max_tokens=max_tokens, hide=hide, out_type=Llama3Instruct70BOut, @@ -552,43 +612,6 @@ def future(self) -> FutureLlama3Instruct70BOut: # type: ignore return super().future # type: ignore -class GenerateTextVision(CoreNode[GenerateTextVisionOut]): - """https://substrate.run/nodes#GenerateTextVision""" - - def __init__( - self, - prompt: str, - image_uris: List[str], - max_tokens: int = 800, - hide: bool = False, - ): - """ - Args: - prompt: Text prompt. - image_uris: Image prompts. - max_tokens: Maximum number of tokens to generate. - - https://substrate.run/nodes#GenerateTextVision - """ - super().__init__( - prompt=prompt, - image_uris=image_uris, - max_tokens=max_tokens, - hide=hide, - out_type=GenerateTextVisionOut, - ) - self.node = "GenerateTextVision" - - @property - def future(self) -> FutureGenerateTextVisionOut: # type: ignore - """ - Future reference to this node's output. - - https://substrate.run/nodes#GenerateTextVision - """ - return super().future # type: ignore - - class Firellava13B(CoreNode[Firellava13BOut]): """https://substrate.run/nodes#Firellava13B""" @@ -596,7 +619,7 @@ def __init__( self, prompt: str, image_uris: List[str], - max_tokens: int = 800, + max_tokens: Optional[int] = None, hide: bool = False, ): """ @@ -794,8 +817,8 @@ def future(self) -> FutureStableDiffusionXLControlNetOut: # type: ignore return super().future # type: ignore -class GenerativeEditImage(CoreNode[GenerativeEditImageOut]): - """https://substrate.run/nodes#GenerativeEditImage""" +class InpaintImage(CoreNode[InpaintImageOut]): + """https://substrate.run/nodes#InpaintImage""" def __init__( self, @@ -812,7 +835,7 @@ def __init__( mask_image_uri: Mask image that controls which pixels are inpainted. If unset, the entire image is edited (image-to-image). store: Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://guides.substrate.run/guides/external-file-storage). If unset, the image data will be returned as a base64-encoded string. - https://substrate.run/nodes#GenerativeEditImage + https://substrate.run/nodes#InpaintImage """ super().__init__( image_uri=image_uri, @@ -820,22 +843,22 @@ def __init__( mask_image_uri=mask_image_uri, store=store, hide=hide, - out_type=GenerativeEditImageOut, + out_type=InpaintImageOut, ) - self.node = "GenerativeEditImage" + self.node = "InpaintImage" @property - def future(self) -> FutureGenerativeEditImageOut: # type: ignore + def future(self) -> FutureInpaintImageOut: # type: ignore """ Future reference to this node's output. - https://substrate.run/nodes#GenerativeEditImage + https://substrate.run/nodes#InpaintImage """ return super().future # type: ignore -class MultiGenerativeEditImage(CoreNode[MultiGenerativeEditImageOut]): - """https://substrate.run/nodes#MultiGenerativeEditImage""" +class MultiInpaintImage(CoreNode[MultiInpaintImageOut]): + """https://substrate.run/nodes#MultiInpaintImage""" def __init__( self, @@ -854,7 +877,7 @@ def __init__( mask_image_uri: Mask image that controls which pixels are edited (inpainting). If unset, the entire image is edited (image-to-image). store: Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://guides.substrate.run/guides/external-file-storage). If unset, the image data will be returned as a base64-encoded string. - https://substrate.run/nodes#MultiGenerativeEditImage + https://substrate.run/nodes#MultiInpaintImage """ super().__init__( image_uri=image_uri, @@ -863,16 +886,16 @@ def __init__( mask_image_uri=mask_image_uri, store=store, hide=hide, - out_type=MultiGenerativeEditImageOut, + out_type=MultiInpaintImageOut, ) - self.node = "MultiGenerativeEditImage" + self.node = "MultiInpaintImage" @property - def future(self) -> FutureMultiGenerativeEditImageOut: # type: ignore + def future(self) -> FutureMultiInpaintImageOut: # type: ignore """ Future reference to this node's output. - https://substrate.run/nodes#MultiGenerativeEditImage + https://substrate.run/nodes#MultiInpaintImage """ return super().future # type: ignore @@ -932,8 +955,8 @@ def future(self) -> FutureStableDiffusionXLInpaintOut: # type: ignore return super().future # type: ignore -class FillMask(CoreNode[FillMaskOut]): - """https://substrate.run/nodes#FillMask""" +class EraseImage(CoreNode[EraseImageOut]): + """https://substrate.run/nodes#EraseImage""" def __init__( self, @@ -948,23 +971,23 @@ def __init__( mask_image_uri: Mask image that controls which pixels are inpainted. store: Use "hosted" to return an image URL hosted on Substrate. You can also provide a URL to a registered [file store](https://guides.substrate.run/guides/external-file-storage). If unset, the image data will be returned as a base64-encoded string. - https://substrate.run/nodes#FillMask + https://substrate.run/nodes#EraseImage """ super().__init__( image_uri=image_uri, mask_image_uri=mask_image_uri, store=store, hide=hide, - out_type=FillMaskOut, + out_type=EraseImageOut, ) - self.node = "FillMask" + self.node = "EraseImage" @property - def future(self) -> FutureFillMaskOut: # type: ignore + def future(self) -> FutureEraseImageOut: # type: ignore """ Future reference to this node's output. - https://substrate.run/nodes#FillMask + https://substrate.run/nodes#EraseImage """ return super().future # type: ignore @@ -1126,8 +1149,8 @@ def future(self) -> FutureSegmentAnythingOut: # type: ignore return super().future # type: ignore -class TranscribeMedia(CoreNode[TranscribeMediaOut]): - """https://substrate.run/nodes#TranscribeMedia""" +class TranscribeSpeech(CoreNode[TranscribeSpeechOut]): + """https://substrate.run/nodes#TranscribeSpeech""" def __init__( self, @@ -1150,7 +1173,7 @@ def __init__( diarize: Identify speakers for each segment. Speaker IDs will be included in each segment. suggest_chapters: Suggest automatic chapter markers. - https://substrate.run/nodes#TranscribeMedia + https://substrate.run/nodes#TranscribeSpeech """ super().__init__( audio_uri=audio_uri, @@ -1161,16 +1184,16 @@ def __init__( diarize=diarize, suggest_chapters=suggest_chapters, hide=hide, - out_type=TranscribeMediaOut, + out_type=TranscribeSpeechOut, ) - self.node = "TranscribeMedia" + self.node = "TranscribeSpeech" @property - def future(self) -> FutureTranscribeMediaOut: # type: ignore + def future(self) -> FutureTranscribeSpeechOut: # type: ignore """ Future reference to this node's output. - https://substrate.run/nodes#TranscribeMedia + https://substrate.run/nodes#TranscribeSpeech """ return super().future # type: ignore @@ -1199,46 +1222,6 @@ def future(self) -> FutureGenerateSpeechOut: # type: ignore return super().future # type: ignore -class XTTSV2(CoreNode[XTTSV2Out]): - """https://substrate.run/nodes#XTTSV2""" - - def __init__( - self, - text: str, - audio_uri: Optional[str] = None, - language: str = "en", - store: Optional[str] = None, - hide: bool = False, - ): - """ - Args: - text: Input text. - audio_uri: Reference audio used to synthesize the speaker. If unset, a default speaker voice will be used. - language: Language of input text. Supported languages: `en, de, fr, es, it, pt, pl, zh, ar, cs, ru, nl, tr, hu, ko`. - store: Use "hosted" to return an audio URL hosted on Substrate. You can also provide a URL to a registered [file store](https://guides.substrate.run/guides/external-file-storage). If unset, the audio data will be returned as a base64-encoded string. - - https://substrate.run/nodes#XTTSV2 - """ - super().__init__( - text=text, - audio_uri=audio_uri, - language=language, - store=store, - hide=hide, - out_type=XTTSV2Out, - ) - self.node = "XTTSV2" - - @property - def future(self) -> FutureXTTSV2Out: # type: ignore - """ - Future reference to this node's output. - - https://substrate.run/nodes#XTTSV2 - """ - return super().future # type: ignore - - class EmbedText(CoreNode[EmbedTextOut]): """https://substrate.run/nodes#EmbedText""" diff --git a/substrate/typeddict_models.py b/substrate/typeddict_models.py index 7431184..a40a9a7 100644 --- a/substrate/typeddict_models.py +++ b/substrate/typeddict_models.py @@ -87,6 +87,10 @@ class GenerateTextIn(TypedDict): """ Input prompt. """ + image_uris: NotRequired[List[str]] + """ + Image prompts. + """ temperature: NotRequired[float] """ Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. @@ -95,16 +99,17 @@ class GenerateTextIn(TypedDict): """ Maximum number of tokens to generate. """ - node: NotRequired[ + model: NotRequired[ Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B", "Llama3Instruct70B", + "Firellava13B", ] ] """ - Selected node. + Selected model. `Firellava13B` is automatically selected when `image_uris` is provided. """ @@ -132,9 +137,9 @@ class GenerateJSONIn(TypedDict): """ Maximum number of tokens to generate. """ - node: NotRequired[Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"]] + model: NotRequired[Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"]] """ - Selected node. + Selected model. """ @@ -166,7 +171,7 @@ class MultiGenerateTextIn(TypedDict): """ Maximum number of tokens to generate. """ - node: NotRequired[ + model: NotRequired[ Literal[ "Mistral7BInstruct", "Mixtral8x7BInstruct", @@ -175,7 +180,7 @@ class MultiGenerateTextIn(TypedDict): ] ] """ - Selected node. + Selected model. """ @@ -229,9 +234,9 @@ class MultiGenerateJSONIn(TypedDict): """ Maximum number of tokens to generate. """ - node: NotRequired[Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"]] + model: NotRequired[Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"]] """ - Selected node. + Selected model. """ @@ -243,9 +248,9 @@ class MultiGenerateJSONOut(TypedDict): class BatchGenerateJSONIn(TypedDict): - node: NotRequired[Literal["Mistral7BInstruct", "Llama3Instruct8B"]] + model: NotRequired[Literal["Mistral7BInstruct", "Llama3Instruct8B"]] """ - Selected node. + Selected model. """ prompts: NotRequired[List[str]] """ @@ -277,6 +282,10 @@ class Mistral7BInstructIn(TypedDict): """ Input prompt. """ + system_prompt: NotRequired[str] + """ + System prompt. + """ num_choices: NotRequired[int] """ Number of choices to generate. @@ -287,7 +296,23 @@ class Mistral7BInstructIn(TypedDict): """ temperature: NotRequired[float] """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: NotRequired[float] + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: NotRequired[float] + """ + Probability below which less likely tokens are filtered out. """ max_tokens: NotRequired[int] """ @@ -318,6 +343,10 @@ class Mixtral8x7BInstructIn(TypedDict): """ Input prompt. """ + system_prompt: NotRequired[str] + """ + System prompt. + """ num_choices: NotRequired[int] """ Number of choices to generate. @@ -328,7 +357,23 @@ class Mixtral8x7BInstructIn(TypedDict): """ temperature: NotRequired[float] """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: NotRequired[float] + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: NotRequired[float] + """ + Probability below which less likely tokens are filtered out. """ max_tokens: NotRequired[int] """ @@ -359,13 +404,33 @@ class Llama3Instruct8BIn(TypedDict): """ Input prompt. """ + system_prompt: NotRequired[str] + """ + System prompt. + """ num_choices: NotRequired[int] """ Number of choices to generate. """ temperature: NotRequired[float] """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. + """ + frequency_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeating previous tokens. + """ + repetition_penalty: NotRequired[float] + """ + Higher values decrease the likelihood of repeated sequences. + """ + presence_penalty: NotRequired[float] + """ + Higher values increase the likelihood of new topics appearing. + """ + top_p: NotRequired[float] + """ + Probability below which less likely tokens are filtered out. """ max_tokens: NotRequired[int] """ @@ -400,42 +465,33 @@ class Llama3Instruct70BIn(TypedDict): """ Input prompt. """ + system_prompt: NotRequired[str] + """ + System prompt. + """ num_choices: NotRequired[int] """ Number of choices to generate. """ temperature: NotRequired[float] """ - Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. + Higher values make the output more random, lower values make the output more deterministic. """ - max_tokens: NotRequired[int] + frequency_penalty: NotRequired[float] """ - Maximum number of tokens to generate. + Higher values decrease the likelihood of repeating previous tokens. """ - - -class Llama3Instruct70BChoice(TypedDict): - text: NotRequired[str] - """ - Text response. - """ - - -class Llama3Instruct70BOut(TypedDict): - choices: NotRequired[List[Llama3Instruct70BChoice]] + repetition_penalty: NotRequired[float] """ - Response choices. + Higher values decrease the likelihood of repeated sequences. """ - - -class GenerateTextVisionIn(TypedDict): - prompt: NotRequired[str] + presence_penalty: NotRequired[float] """ - Text prompt. + Higher values increase the likelihood of new topics appearing. """ - image_uris: NotRequired[List[str]] + top_p: NotRequired[float] """ - Image prompts. + Probability below which less likely tokens are filtered out. """ max_tokens: NotRequired[int] """ @@ -443,13 +499,20 @@ class GenerateTextVisionIn(TypedDict): """ -class GenerateTextVisionOut(TypedDict): +class Llama3Instruct70BChoice(TypedDict): text: NotRequired[str] """ Text response. """ +class Llama3Instruct70BOut(TypedDict): + choices: NotRequired[List[Llama3Instruct70BChoice]] + """ + Response choices. + """ + + class Firellava13BIn(TypedDict): prompt: NotRequired[str] """ @@ -703,7 +766,7 @@ class StableDiffusionXLControlNetOut(TypedDict): """ -class GenerativeEditImageIn(TypedDict): +class InpaintImageIn(TypedDict): image_uri: NotRequired[str] """ Original image. @@ -722,14 +785,14 @@ class GenerativeEditImageIn(TypedDict): """ -class GenerativeEditImageOut(TypedDict): +class InpaintImageOut(TypedDict): image_uri: NotRequired[str] """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. """ -class MultiGenerativeEditImageIn(TypedDict): +class MultiInpaintImageIn(TypedDict): image_uri: NotRequired[str] """ Original image. @@ -752,8 +815,8 @@ class MultiGenerativeEditImageIn(TypedDict): """ -class MultiGenerativeEditImageOut(TypedDict): - outputs: NotRequired[List[GenerativeEditImageOut]] +class MultiInpaintImageOut(TypedDict): + outputs: NotRequired[List[InpaintImageOut]] """ Generated images. """ @@ -835,7 +898,7 @@ class Point(TypedDict): """ -class FillMaskIn(TypedDict): +class EraseImageIn(TypedDict): image_uri: NotRequired[str] """ Input image. @@ -850,7 +913,7 @@ class FillMaskIn(TypedDict): """ -class FillMaskOut(TypedDict): +class EraseImageOut(TypedDict): image_uri: NotRequired[str] """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -997,7 +1060,7 @@ class SegmentAnythingOut(TypedDict): """ -class TranscribeMediaIn(TypedDict): +class TranscribeSpeechIn(TypedDict): audio_uri: NotRequired[str] """ Input audio. @@ -1081,7 +1144,7 @@ class ChapterMarker(TypedDict): """ -class TranscribeMediaOut(TypedDict): +class TranscribeSpeechOut(TypedDict): text: NotRequired[str] """ Transcribed text. From e8a2d23f03985abb0508d474b4c132cdb1c90067 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 11:22:58 -0400 Subject: [PATCH 2/7] wip --- examples/basic.py | 7 +- substrate/core/models.py | 331 ++++++++++++++++++++++++++- substrate/future_dataclass_models.py | 5 +- substrate/nodes.py | 100 +++++++- substrate/typeddict_models.py | 5 +- 5 files changed, 427 insertions(+), 21 deletions(-) diff --git a/examples/basic.py b/examples/basic.py index 1204e5b..62aa40b 100755 --- a/examples/basic.py +++ b/examples/basic.py @@ -10,14 +10,15 @@ if api_key is None: raise EnvironmentError("No SUBSTRATE_API_KEY set") -from substrate import Substrate, GenerateText, sb +from substrate import Substrate, GenerateText substrate = Substrate(api_key=api_key, timeout=60 * 5) story = GenerateText(prompt="tell me a story") -summary = GenerateText(prompt=sb.concat("Summarize this story: ", story.future.text)) +# summary = GenerateText(prompt=sb.concat("Summarize this story: ", story.future.text)) -response = substrate.run(story, summary) +# response = substrate.run(story, summary) +response = substrate.run(story) print(response.api_response.json) print(response.api_response.status_code) diff --git a/substrate/core/models.py b/substrate/core/models.py index 83afea1..a1fee99 100644 --- a/substrate/core/models.py +++ b/substrate/core/models.py @@ -1,7 +1,6 @@ """ -𐃏 Substrate -@generated file -(using datamodel-codegen) +֍ Substrate +generated file """ @@ -10,10 +9,13 @@ from typing import Any, Dict, List, Optional from typing_extensions import Literal, Annotated -from pydantic import Field, BaseModel +from pydantic import Extra, Field, BaseModel class ErrorOut(BaseModel): + class Config: + extra = Extra.allow + type: Literal["api_error", "invalid_request_error"] """ The type of error returned. @@ -25,6 +27,9 @@ class ErrorOut(BaseModel): class ExperimentalIn(BaseModel): + class Config: + extra = Extra.allow + name: str """ Identifier. @@ -40,6 +45,9 @@ class ExperimentalIn(BaseModel): class ExperimentalOut(BaseModel): + class Config: + extra = Extra.allow + output: Dict[str, Any] """ Response. @@ -47,6 +55,9 @@ class ExperimentalOut(BaseModel): class RunPythonIn(BaseModel): + class Config: + extra = Extra.allow + pkl_function: Optional[str] = None """ Pickled function. @@ -66,6 +77,9 @@ class RunPythonIn(BaseModel): class RunPythonOut(BaseModel): + class Config: + extra = Extra.allow + output: Optional[Any] = None """ Return value of your function. @@ -85,6 +99,9 @@ class RunPythonOut(BaseModel): class GenerateTextIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -114,6 +131,9 @@ class GenerateTextIn(BaseModel): class GenerateTextOut(BaseModel): + class Config: + extra = Extra.allow + text: str """ Text response. @@ -121,6 +141,9 @@ class GenerateTextOut(BaseModel): class GenerateJSONIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -144,6 +167,9 @@ class GenerateJSONIn(BaseModel): class GenerateJSONOut(BaseModel): + class Config: + extra = Extra.allow + json_object: Optional[Dict[str, Any]] = None """ JSON response. @@ -155,6 +181,9 @@ class GenerateJSONOut(BaseModel): class MultiGenerateTextIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -183,6 +212,9 @@ class MultiGenerateTextIn(BaseModel): class MultiGenerateTextOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[GenerateTextOut] """ Response choices. @@ -190,6 +222,9 @@ class MultiGenerateTextOut(BaseModel): class BatchGenerateTextIn(BaseModel): + class Config: + extra = Extra.allow + prompts: List[str] """ Batch input prompts. @@ -205,6 +240,9 @@ class BatchGenerateTextIn(BaseModel): class BatchGenerateTextOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[GenerateTextOut] """ Batch outputs. @@ -212,6 +250,9 @@ class BatchGenerateTextOut(BaseModel): class MultiGenerateJSONIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -239,6 +280,9 @@ class MultiGenerateJSONIn(BaseModel): class MultiGenerateJSONOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[GenerateJSONOut] """ Response choices. @@ -246,6 +290,9 @@ class MultiGenerateJSONOut(BaseModel): class BatchGenerateJSONIn(BaseModel): + class Config: + extra = Extra.allow + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" """ Selected model. @@ -269,6 +316,9 @@ class BatchGenerateJSONIn(BaseModel): class BatchGenerateJSONOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[GenerateJSONOut] """ Batch outputs. @@ -276,6 +326,9 @@ class BatchGenerateJSONOut(BaseModel): class Mistral7BInstructIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -319,6 +372,9 @@ class Mistral7BInstructIn(BaseModel): class Mistral7BInstructChoice(BaseModel): + class Config: + extra = Extra.allow + text: Optional[str] = None """ Text response, if `json_schema` was not provided. @@ -330,6 +386,9 @@ class Mistral7BInstructChoice(BaseModel): class Mistral7BInstructOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[Mistral7BInstructChoice] """ Response choices. @@ -337,6 +396,9 @@ class Mistral7BInstructOut(BaseModel): class Mixtral8x7BInstructIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -380,6 +442,9 @@ class Mixtral8x7BInstructIn(BaseModel): class Mixtral8x7BChoice(BaseModel): + class Config: + extra = Extra.allow + text: Optional[str] = None """ Text response, if `json_schema` was not provided. @@ -391,6 +456,9 @@ class Mixtral8x7BChoice(BaseModel): class Mixtral8x7BInstructOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[Mixtral8x7BChoice] """ Response choices. @@ -398,6 +466,9 @@ class Mixtral8x7BInstructOut(BaseModel): class Llama3Instruct8BIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -441,6 +512,9 @@ class Llama3Instruct8BIn(BaseModel): class Llama3Instruct8BChoice(BaseModel): + class Config: + extra = Extra.allow + text: Optional[str] = None """ Text response. @@ -452,6 +526,9 @@ class Llama3Instruct8BChoice(BaseModel): class Llama3Instruct8BOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[Llama3Instruct8BChoice] """ Response choices. @@ -459,6 +536,9 @@ class Llama3Instruct8BOut(BaseModel): class Llama3Instruct70BIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Input prompt. @@ -498,6 +578,9 @@ class Llama3Instruct70BIn(BaseModel): class Llama3Instruct70BChoice(BaseModel): + class Config: + extra = Extra.allow + text: Optional[str] = None """ Text response. @@ -505,6 +588,9 @@ class Llama3Instruct70BChoice(BaseModel): class Llama3Instruct70BOut(BaseModel): + class Config: + extra = Extra.allow + choices: List[Llama3Instruct70BChoice] """ Response choices. @@ -512,6 +598,9 @@ class Llama3Instruct70BOut(BaseModel): class Firellava13BIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -527,6 +616,9 @@ class Firellava13BIn(BaseModel): class Firellava13BOut(BaseModel): + class Config: + extra = Extra.allow + text: str """ Text response. @@ -534,6 +626,9 @@ class Firellava13BOut(BaseModel): class GenerateImageIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -545,6 +640,9 @@ class GenerateImageIn(BaseModel): class GenerateImageOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -552,6 +650,9 @@ class GenerateImageOut(BaseModel): class MultiGenerateImageIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -567,6 +668,9 @@ class MultiGenerateImageIn(BaseModel): class MultiGenerateImageOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[GenerateImageOut] """ Generated images. @@ -574,6 +678,9 @@ class MultiGenerateImageOut(BaseModel): class StableDiffusionXLIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -613,6 +720,9 @@ class StableDiffusionXLIn(BaseModel): class StableDiffusionImage(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -624,6 +734,9 @@ class StableDiffusionImage(BaseModel): class StableDiffusionXLOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[StableDiffusionImage] """ Generated images. @@ -631,6 +744,9 @@ class StableDiffusionXLOut(BaseModel): class StableDiffusionXLLightningIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -662,6 +778,9 @@ class StableDiffusionXLLightningIn(BaseModel): class StableDiffusionXLLightningOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[StableDiffusionImage] """ Generated images. @@ -669,6 +788,9 @@ class StableDiffusionXLLightningOut(BaseModel): class StableDiffusionXLIPAdapterIn(BaseModel): + class Config: + extra = Extra.allow + prompt: str """ Text prompt. @@ -708,6 +830,9 @@ class StableDiffusionXLIPAdapterIn(BaseModel): class StableDiffusionXLIPAdapterOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[StableDiffusionImage] """ Generated images. @@ -715,6 +840,9 @@ class StableDiffusionXLIPAdapterOut(BaseModel): class StableDiffusionXLControlNetIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -758,6 +886,9 @@ class StableDiffusionXLControlNetIn(BaseModel): class StableDiffusionXLControlNetOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[StableDiffusionImage] """ Generated images. @@ -765,6 +896,9 @@ class StableDiffusionXLControlNetOut(BaseModel): class InpaintImageIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Original image. @@ -784,6 +918,9 @@ class InpaintImageIn(BaseModel): class InpaintImageOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -791,6 +928,9 @@ class InpaintImageOut(BaseModel): class MultiInpaintImageIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Original image. @@ -814,6 +954,9 @@ class MultiInpaintImageIn(BaseModel): class MultiInpaintImageOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[InpaintImageOut] """ Generated images. @@ -821,6 +964,9 @@ class MultiInpaintImageOut(BaseModel): class StableDiffusionXLInpaintIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Original image. @@ -860,6 +1006,9 @@ class StableDiffusionXLInpaintIn(BaseModel): class StableDiffusionXLInpaintOut(BaseModel): + class Config: + extra = Extra.allow + outputs: List[StableDiffusionImage] """ Generated images. @@ -867,6 +1016,9 @@ class StableDiffusionXLInpaintOut(BaseModel): class BoundingBox(BaseModel): + class Config: + extra = Extra.allow + x1: float """ Top left corner x. @@ -886,6 +1038,9 @@ class BoundingBox(BaseModel): class Point(BaseModel): + class Config: + extra = Extra.allow + x: int """ X position. @@ -897,6 +1052,9 @@ class Point(BaseModel): class EraseImageIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -912,6 +1070,9 @@ class EraseImageIn(BaseModel): class EraseImageOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -919,6 +1080,9 @@ class EraseImageOut(BaseModel): class BigLaMaIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -934,6 +1098,9 @@ class BigLaMaIn(BaseModel): class BigLaMaOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -941,6 +1108,9 @@ class BigLaMaOut(BaseModel): class RemoveBackgroundIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -960,6 +1130,9 @@ class RemoveBackgroundIn(BaseModel): class RemoveBackgroundOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -967,6 +1140,9 @@ class RemoveBackgroundOut(BaseModel): class DISISNetIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -978,6 +1154,9 @@ class DISISNetIn(BaseModel): class DISISNetOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -985,6 +1164,9 @@ class DISISNetOut(BaseModel): class UpscaleImageIn(BaseModel): + class Config: + extra = Extra.allow + prompt: Optional[str] = None """ Prompt to guide model on the content of image to upscale. @@ -1004,6 +1186,9 @@ class UpscaleImageIn(BaseModel): class UpscaleImageOut(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -1011,6 +1196,9 @@ class UpscaleImageOut(BaseModel): class SegmentUnderPointIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -1026,6 +1214,9 @@ class SegmentUnderPointIn(BaseModel): class SegmentUnderPointOut(BaseModel): + class Config: + extra = Extra.allow + mask_image_uri: str """ Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -1033,6 +1224,9 @@ class SegmentUnderPointOut(BaseModel): class SegmentAnythingIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Input image. @@ -1052,6 +1246,9 @@ class SegmentAnythingIn(BaseModel): class SegmentAnythingOut(BaseModel): + class Config: + extra = Extra.allow + mask_image_uri: str """ Detected segments in 'mask image' format. Base 64-encoded JPEG image bytes, or a hosted image url if `store` is provided. @@ -1059,6 +1256,9 @@ class SegmentAnythingOut(BaseModel): class TranscribeSpeechIn(BaseModel): + class Config: + extra = Extra.allow + audio_uri: str """ Input audio. @@ -1090,6 +1290,9 @@ class TranscribeSpeechIn(BaseModel): class TranscribedWord(BaseModel): + class Config: + extra = Extra.allow + word: str """ Text of word. @@ -1109,6 +1312,9 @@ class TranscribedWord(BaseModel): class TranscribedSegment(BaseModel): + class Config: + extra = Extra.allow + text: str """ Text of segment. @@ -1132,6 +1338,9 @@ class TranscribedSegment(BaseModel): class ChapterMarker(BaseModel): + class Config: + extra = Extra.allow + title: str """ Chapter title. @@ -1143,6 +1352,9 @@ class ChapterMarker(BaseModel): class TranscribeSpeechOut(BaseModel): + class Config: + extra = Extra.allow + text: str """ Transcribed text. @@ -1158,6 +1370,9 @@ class TranscribeSpeechOut(BaseModel): class GenerateSpeechIn(BaseModel): + class Config: + extra = Extra.allow + text: str """ Input text. @@ -1169,6 +1384,9 @@ class GenerateSpeechIn(BaseModel): class GenerateSpeechOut(BaseModel): + class Config: + extra = Extra.allow + audio_uri: str """ Base 64-encoded WAV audio bytes, or a hosted audio url if `store` is provided. @@ -1176,6 +1394,9 @@ class GenerateSpeechOut(BaseModel): class XTTSV2In(BaseModel): + class Config: + extra = Extra.allow + text: str """ Input text. @@ -1195,6 +1416,9 @@ class XTTSV2In(BaseModel): class XTTSV2Out(BaseModel): + class Config: + extra = Extra.allow + audio_uri: str """ Base 64-encoded WAV audio bytes, or a hosted audio url if `store` is provided. @@ -1202,6 +1426,9 @@ class XTTSV2Out(BaseModel): class Embedding(BaseModel): + class Config: + extra = Extra.allow + vector: List[float] """ Embedding vector. @@ -1217,6 +1444,9 @@ class Embedding(BaseModel): class EmbedTextIn(BaseModel): + class Config: + extra = Extra.allow + text: str """ Text to embed. @@ -1244,6 +1474,9 @@ class EmbedTextIn(BaseModel): class EmbedTextOut(BaseModel): + class Config: + extra = Extra.allow + embedding: Embedding """ Generated embedding. @@ -1251,6 +1484,9 @@ class EmbedTextOut(BaseModel): class EmbedTextItem(BaseModel): + class Config: + extra = Extra.allow + text: str """ Text to embed. @@ -1266,6 +1502,9 @@ class EmbedTextItem(BaseModel): class MultiEmbedTextIn(BaseModel): + class Config: + extra = Extra.allow + items: List[EmbedTextItem] """ Items to embed. @@ -1285,6 +1524,9 @@ class MultiEmbedTextIn(BaseModel): class MultiEmbedTextOut(BaseModel): + class Config: + extra = Extra.allow + embeddings: List[Embedding] """ Generated embeddings. @@ -1292,6 +1534,9 @@ class MultiEmbedTextOut(BaseModel): class JinaV2In(BaseModel): + class Config: + extra = Extra.allow + items: List[EmbedTextItem] """ Items to embed. @@ -1307,6 +1552,9 @@ class JinaV2In(BaseModel): class JinaV2Out(BaseModel): + class Config: + extra = Extra.allow + embeddings: List[Embedding] """ Generated embeddings. @@ -1314,6 +1562,9 @@ class JinaV2Out(BaseModel): class EmbedImageIn(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Image to embed. @@ -1333,6 +1584,9 @@ class EmbedImageIn(BaseModel): class EmbedImageOut(BaseModel): + class Config: + extra = Extra.allow + embedding: Embedding """ Generated embedding. @@ -1340,6 +1594,9 @@ class EmbedImageOut(BaseModel): class EmbedImageItem(BaseModel): + class Config: + extra = Extra.allow + image_uri: str """ Image to embed. @@ -1351,6 +1608,9 @@ class EmbedImageItem(BaseModel): class EmbedTextOrImageItem(BaseModel): + class Config: + extra = Extra.allow + image_uri: Optional[str] = None """ Image to embed. @@ -1370,6 +1630,9 @@ class EmbedTextOrImageItem(BaseModel): class MultiEmbedImageIn(BaseModel): + class Config: + extra = Extra.allow + items: List[EmbedImageItem] """ Items to embed. @@ -1385,6 +1648,9 @@ class MultiEmbedImageIn(BaseModel): class MultiEmbedImageOut(BaseModel): + class Config: + extra = Extra.allow + embeddings: List[Embedding] """ Generated embeddings. @@ -1392,6 +1658,9 @@ class MultiEmbedImageOut(BaseModel): class CLIPIn(BaseModel): + class Config: + extra = Extra.allow + items: List[EmbedTextOrImageItem] """ Items to embed. @@ -1407,6 +1676,9 @@ class CLIPIn(BaseModel): class CLIPOut(BaseModel): + class Config: + extra = Extra.allow + embeddings: List[Embedding] """ Generated embeddings. @@ -1414,6 +1686,9 @@ class CLIPOut(BaseModel): class FindOrCreateVectorStoreIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: Annotated[str, Field(max_length=63, min_length=1)] """ Vector store name. @@ -1425,6 +1700,9 @@ class FindOrCreateVectorStoreIn(BaseModel): class FindOrCreateVectorStoreOut(BaseModel): + class Config: + extra = Extra.allow + collection_name: Annotated[str, Field(max_length=63, min_length=1)] """ Vector store name. @@ -1438,8 +1716,14 @@ class FindOrCreateVectorStoreOut(BaseModel): class ListVectorStoresIn(BaseModel): pass + class Config: + extra = Extra.allow + class ListVectorStoresOut(BaseModel): + class Config: + extra = Extra.allow + items: Optional[List[FindOrCreateVectorStoreOut]] = None """ List of vector stores. @@ -1447,6 +1731,9 @@ class ListVectorStoresOut(BaseModel): class DeleteVectorStoreIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store name. @@ -1458,6 +1745,9 @@ class DeleteVectorStoreIn(BaseModel): class DeleteVectorStoreOut(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store name. @@ -1473,6 +1763,9 @@ class Vector(BaseModel): Canonical representation of document with embedding vector. """ + class Config: + extra = Extra.allow + id: str """ Document ID. @@ -1488,6 +1781,9 @@ class Vector(BaseModel): class FetchVectorsIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store name. @@ -1503,6 +1799,9 @@ class FetchVectorsIn(BaseModel): class FetchVectorsOut(BaseModel): + class Config: + extra = Extra.allow + vectors: List[Vector] """ Retrieved vectors. @@ -1510,6 +1809,9 @@ class FetchVectorsOut(BaseModel): class UpdateVectorsOut(BaseModel): + class Config: + extra = Extra.allow + count: int """ Number of vectors modified. @@ -1517,6 +1819,9 @@ class UpdateVectorsOut(BaseModel): class DeleteVectorsOut(BaseModel): + class Config: + extra = Extra.allow + count: int """ Number of vectors modified. @@ -1524,6 +1829,9 @@ class DeleteVectorsOut(BaseModel): class UpdateVectorParams(BaseModel): + class Config: + extra = Extra.allow + id: str """ Document ID. @@ -1539,6 +1847,9 @@ class UpdateVectorParams(BaseModel): class UpdateVectorsIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store name. @@ -1554,6 +1865,9 @@ class UpdateVectorsIn(BaseModel): class DeleteVectorsIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store name. @@ -1569,6 +1883,9 @@ class DeleteVectorsIn(BaseModel): class QueryVectorStoreIn(BaseModel): + class Config: + extra = Extra.allow + collection_name: str """ Vector store to query against. @@ -1616,6 +1933,9 @@ class QueryVectorStoreIn(BaseModel): class VectorStoreQueryResult(BaseModel): + class Config: + extra = Extra.allow + id: str """ Document ID. @@ -1635,6 +1955,9 @@ class VectorStoreQueryResult(BaseModel): class QueryVectorStoreOut(BaseModel): + class Config: + extra = Extra.allow + results: List[List[VectorStoreQueryResult]] """ Query results. diff --git a/substrate/future_dataclass_models.py b/substrate/future_dataclass_models.py index ecaeb38..5f2791e 100644 --- a/substrate/future_dataclass_models.py +++ b/substrate/future_dataclass_models.py @@ -1,7 +1,6 @@ """ -𐃏 Substrate -@generated file -(using datamodel-codegen) +֍ Substrate +generated file """ diff --git a/substrate/nodes.py b/substrate/nodes.py index 5d86915..c352e33 100644 --- a/substrate/nodes.py +++ b/substrate/nodes.py @@ -95,7 +95,14 @@ class Experimental(CoreNode[ExperimentalOut]): """https://substrate.run/nodes#Experimental""" - def __init__(self, name: str, args: Dict[str, Any], timeout: int = 60, hide: bool = False): + def __init__( + self, + name: str, + args: Dict[str, Any], + timeout: int = 60, + hide: bool = False, + **kwargs, + ): """ Args: name: Identifier. @@ -104,7 +111,14 @@ def __init__(self, name: str, args: Dict[str, Any], timeout: int = 60, hide: boo https://substrate.run/nodes#Experimental """ - super().__init__(name=name, args=args, timeout=timeout, hide=hide, out_type=ExperimentalOut) + super().__init__( + name=name, + args=args, + timeout=timeout, + hide=hide, + out_type=ExperimentalOut, + **kwargs, + ) self.node = "Experimental" @property @@ -134,6 +148,7 @@ def __init__( "Firellava13B", ] = "Llama3Instruct8B", hide: bool = False, + **kwargs, ): """ Args: @@ -153,6 +168,7 @@ def __init__( model=model, hide=hide, out_type=GenerateTextOut, + **kwargs, ) self.node = "GenerateText" @@ -177,6 +193,7 @@ def __init__( max_tokens: Optional[int] = None, model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, + **kwargs, ): """ Args: @@ -196,6 +213,7 @@ def __init__( model=model, hide=hide, out_type=GenerateJSONOut, + **kwargs, ) self.node = "GenerateJSON" @@ -225,6 +243,7 @@ def __init__( "Llama3Instruct70B", ] = "Llama3Instruct8B", hide: bool = False, + **kwargs, ): """ Args: @@ -244,6 +263,7 @@ def __init__( model=model, hide=hide, out_type=MultiGenerateTextOut, + **kwargs, ) self.node = "MultiGenerateText" @@ -266,6 +286,7 @@ def __init__( temperature: float = 0.4, max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -281,6 +302,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=BatchGenerateTextOut, + **kwargs, ) self.node = "BatchGenerateText" @@ -306,6 +328,7 @@ def __init__( max_tokens: Optional[int] = None, model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, + **kwargs, ): """ Args: @@ -327,6 +350,7 @@ def __init__( model=model, hide=hide, out_type=MultiGenerateJSONOut, + **kwargs, ) self.node = "MultiGenerateJSON" @@ -351,6 +375,7 @@ def __init__( temperature: float = 0.4, max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -370,6 +395,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=BatchGenerateJSONOut, + **kwargs, ) self.node = "BatchGenerateJSON" @@ -399,6 +425,7 @@ def __init__( top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -428,6 +455,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=Mistral7BInstructOut, + **kwargs, ) self.node = "Mistral7BInstruct" @@ -457,6 +485,7 @@ def __init__( top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -486,6 +515,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=Mixtral8x7BInstructOut, + **kwargs, ) self.node = "Mixtral8x7BInstruct" @@ -515,6 +545,7 @@ def __init__( max_tokens: Optional[int] = None, json_schema: Optional[Dict[str, Any]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -544,6 +575,7 @@ def __init__( json_schema=json_schema, hide=hide, out_type=Llama3Instruct8BOut, + **kwargs, ) self.node = "Llama3Instruct8B" @@ -572,6 +604,7 @@ def __init__( top_p: float = 0.95, max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -599,6 +632,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=Llama3Instruct70BOut, + **kwargs, ) self.node = "Llama3Instruct70B" @@ -621,6 +655,7 @@ def __init__( image_uris: List[str], max_tokens: Optional[int] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -636,6 +671,7 @@ def __init__( max_tokens=max_tokens, hide=hide, out_type=Firellava13BOut, + **kwargs, ) self.node = "Firellava13B" @@ -652,7 +688,7 @@ def future(self) -> FutureFirellava13BOut: # type: ignore class GenerateImage(CoreNode[GenerateImageOut]): """https://substrate.run/nodes#GenerateImage""" - def __init__(self, prompt: str, store: Optional[str] = None, hide: bool = False): + def __init__(self, prompt: str, store: Optional[str] = None, hide: bool = False, **kwargs): """ Args: prompt: Text prompt. @@ -660,7 +696,7 @@ def __init__(self, prompt: str, store: Optional[str] = None, hide: bool = False) https://substrate.run/nodes#GenerateImage """ - super().__init__(prompt=prompt, store=store, hide=hide, out_type=GenerateImageOut) + super().__init__(prompt=prompt, store=store, hide=hide, out_type=GenerateImageOut, **kwargs) self.node = "GenerateImage" @property @@ -682,6 +718,7 @@ def __init__( num_images: int, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -697,6 +734,7 @@ def __init__( store=store, hide=hide, out_type=MultiGenerateImageOut, + **kwargs, ) self.node = "MultiGenerateImage" @@ -723,6 +761,7 @@ def __init__( width: int = 1024, seeds: Optional[List[int]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -746,6 +785,7 @@ def __init__( seeds=seeds, hide=hide, out_type=StableDiffusionXLLightningOut, + **kwargs, ) self.node = "StableDiffusionXLLightning" @@ -775,6 +815,7 @@ def __init__( strength: float = 0.5, seeds: Optional[List[int]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -804,6 +845,7 @@ def __init__( seeds=seeds, hide=hide, out_type=StableDiffusionXLControlNetOut, + **kwargs, ) self.node = "StableDiffusionXLControlNet" @@ -827,6 +869,7 @@ def __init__( mask_image_uri: Optional[str] = None, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -844,6 +887,7 @@ def __init__( store=store, hide=hide, out_type=InpaintImageOut, + **kwargs, ) self.node = "InpaintImage" @@ -868,6 +912,7 @@ def __init__( mask_image_uri: Optional[str] = None, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -887,6 +932,7 @@ def __init__( store=store, hide=hide, out_type=MultiInpaintImageOut, + **kwargs, ) self.node = "MultiInpaintImage" @@ -915,6 +961,7 @@ def __init__( strength: float = 0.8, seeds: Optional[List[int]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -942,6 +989,7 @@ def __init__( seeds=seeds, hide=hide, out_type=StableDiffusionXLInpaintOut, + **kwargs, ) self.node = "StableDiffusionXLInpaint" @@ -964,6 +1012,7 @@ def __init__( mask_image_uri: str, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -979,6 +1028,7 @@ def __init__( store=store, hide=hide, out_type=EraseImageOut, + **kwargs, ) self.node = "EraseImage" @@ -1002,6 +1052,7 @@ def __init__( background_color: Optional[str] = None, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1019,6 +1070,7 @@ def __init__( store=store, hide=hide, out_type=RemoveBackgroundOut, + **kwargs, ) self.node = "RemoveBackground" @@ -1042,6 +1094,7 @@ def __init__( output_resolution: int = 1024, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1059,6 +1112,7 @@ def __init__( store=store, hide=hide, out_type=UpscaleImageOut, + **kwargs, ) self.node = "UpscaleImage" @@ -1081,6 +1135,7 @@ def __init__( point: Point, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1096,6 +1151,7 @@ def __init__( store=store, hide=hide, out_type=SegmentUnderPointOut, + **kwargs, ) self.node = "SegmentUnderPoint" @@ -1119,6 +1175,7 @@ def __init__( box_prompts: Optional[List[BoundingBox]] = None, store: Optional[str] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1136,6 +1193,7 @@ def __init__( store=store, hide=hide, out_type=SegmentAnythingOut, + **kwargs, ) self.node = "SegmentAnything" @@ -1162,6 +1220,7 @@ def __init__( diarize: bool = False, suggest_chapters: bool = False, hide: bool = False, + **kwargs, ): """ Args: @@ -1185,6 +1244,7 @@ def __init__( suggest_chapters=suggest_chapters, hide=hide, out_type=TranscribeSpeechOut, + **kwargs, ) self.node = "TranscribeSpeech" @@ -1201,7 +1261,7 @@ def future(self) -> FutureTranscribeSpeechOut: # type: ignore class GenerateSpeech(CoreNode[GenerateSpeechOut]): """https://substrate.run/nodes#GenerateSpeech""" - def __init__(self, text: str, store: Optional[str] = None, hide: bool = False): + def __init__(self, text: str, store: Optional[str] = None, hide: bool = False, **kwargs): """ Args: text: Input text. @@ -1209,7 +1269,7 @@ def __init__(self, text: str, store: Optional[str] = None, hide: bool = False): https://substrate.run/nodes#GenerateSpeech """ - super().__init__(text=text, store=store, hide=hide, out_type=GenerateSpeechOut) + super().__init__(text=text, store=store, hide=hide, out_type=GenerateSpeechOut, **kwargs) self.node = "GenerateSpeech" @property @@ -1234,6 +1294,7 @@ def __init__( doc_id: Optional[str] = None, model: Literal["jina-v2", "clip"] = "jina-v2", hide: bool = False, + **kwargs, ): """ Args: @@ -1255,6 +1316,7 @@ def __init__( model=model, hide=hide, out_type=EmbedTextOut, + **kwargs, ) self.node = "EmbedText" @@ -1278,6 +1340,7 @@ def __init__( embedded_metadata_keys: Optional[List[str]] = None, model: Literal["jina-v2", "clip"] = "jina-v2", hide: bool = False, + **kwargs, ): """ Args: @@ -1295,6 +1358,7 @@ def __init__( model=model, hide=hide, out_type=MultiEmbedTextOut, + **kwargs, ) self.node = "MultiEmbedText" @@ -1317,6 +1381,7 @@ def __init__( collection_name: Optional[str] = None, embedded_metadata_keys: Optional[List[str]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1332,6 +1397,7 @@ def __init__( embedded_metadata_keys=embedded_metadata_keys, hide=hide, out_type=JinaV2Out, + **kwargs, ) self.node = "JinaV2" @@ -1355,6 +1421,7 @@ def __init__( doc_id: Optional[str] = None, model: Literal["clip"] = "clip", hide: bool = False, + **kwargs, ): """ Args: @@ -1372,6 +1439,7 @@ def __init__( model=model, hide=hide, out_type=EmbedImageOut, + **kwargs, ) self.node = "EmbedImage" @@ -1394,6 +1462,7 @@ def __init__( collection_name: Optional[str] = None, model: Literal["clip"] = "clip", hide: bool = False, + **kwargs, ): """ Args: @@ -1409,6 +1478,7 @@ def __init__( model=model, hide=hide, out_type=MultiEmbedImageOut, + **kwargs, ) self.node = "MultiEmbedImage" @@ -1431,6 +1501,7 @@ def __init__( collection_name: Optional[str] = None, embedded_metadata_keys: Optional[List[str]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1446,6 +1517,7 @@ def __init__( embedded_metadata_keys=embedded_metadata_keys, hide=hide, out_type=CLIPOut, + **kwargs, ) self.node = "CLIP" @@ -1467,6 +1539,7 @@ def __init__( collection_name: str, model: Literal["jina-v2", "clip"], hide: bool = False, + **kwargs, ): """ Args: @@ -1480,6 +1553,7 @@ def __init__( model=model, hide=hide, out_type=FindOrCreateVectorStoreOut, + **kwargs, ) self.node = "FindOrCreateVectorStore" @@ -1496,13 +1570,13 @@ def future(self) -> FutureFindOrCreateVectorStoreOut: # type: ignore class ListVectorStores(CoreNode[ListVectorStoresOut]): """https://substrate.run/nodes#ListVectorStores""" - def __init__(self, hide: bool = False): + def __init__(self, hide: bool = False, **kwargs): """ Args: https://substrate.run/nodes#ListVectorStores """ - super().__init__(hide=hide, out_type=ListVectorStoresOut) + super().__init__(hide=hide, out_type=ListVectorStoresOut, **kwargs) self.node = "ListVectorStores" @property @@ -1523,6 +1597,7 @@ def __init__( collection_name: str, model: Literal["jina-v2", "clip"], hide: bool = False, + **kwargs, ): """ Args: @@ -1536,6 +1611,7 @@ def __init__( model=model, hide=hide, out_type=DeleteVectorStoreOut, + **kwargs, ) self.node = "DeleteVectorStore" @@ -1558,6 +1634,7 @@ def __init__( model: Literal["jina-v2", "clip"], ids: List[str], hide: bool = False, + **kwargs, ): """ Args: @@ -1573,6 +1650,7 @@ def __init__( ids=ids, hide=hide, out_type=FetchVectorsOut, + **kwargs, ) self.node = "FetchVectors" @@ -1595,6 +1673,7 @@ def __init__( model: Literal["jina-v2", "clip"], vectors: List[UpdateVectorParams], hide: bool = False, + **kwargs, ): """ Args: @@ -1610,6 +1689,7 @@ def __init__( vectors=vectors, hide=hide, out_type=UpdateVectorsOut, + **kwargs, ) self.node = "UpdateVectors" @@ -1632,6 +1712,7 @@ def __init__( model: Literal["jina-v2", "clip"], ids: List[str], hide: bool = False, + **kwargs, ): """ Args: @@ -1647,6 +1728,7 @@ def __init__( ids=ids, hide=hide, out_type=DeleteVectorsOut, + **kwargs, ) self.node = "DeleteVectors" @@ -1677,6 +1759,7 @@ def __init__( include_metadata: bool = False, filters: Optional[Dict[str, Any]] = None, hide: bool = False, + **kwargs, ): """ Args: @@ -1708,6 +1791,7 @@ def __init__( filters=filters, hide=hide, out_type=QueryVectorStoreOut, + **kwargs, ) self.node = "QueryVectorStore" diff --git a/substrate/typeddict_models.py b/substrate/typeddict_models.py index a40a9a7..d6c52f7 100644 --- a/substrate/typeddict_models.py +++ b/substrate/typeddict_models.py @@ -1,7 +1,6 @@ """ -𐃏 Substrate -@generated file -(using datamodel-codegen) +֍ Substrate +generated file """ From 359a804fda300e5818181b3f16adf622d12bab96 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 11:36:21 -0400 Subject: [PATCH 3/7] filter pydantic warnings --- substrate/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/substrate/__init__.py b/substrate/__init__.py index bc3fe59..750856a 100644 --- a/substrate/__init__.py +++ b/substrate/__init__.py @@ -4,6 +4,13 @@ 20240612.20240612 """ +import warnings + +import pydantic + +if pydantic.VERSION.startswith("2"): + warnings.filterwarnings("ignore", category=pydantic.warnings.PydanticDeprecatedSince20) + from .nodes import ( CLIP, JinaV2, From 2dd0e37e00c416df9777295e5c328b2ef6221887 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 11:44:46 -0400 Subject: [PATCH 4/7] filter deprecation warningsfilter dep warnings before importing pydantic models --- substrate/__init__.py | 7 ---- substrate/nodes.py | 86 ++++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 48 deletions(-) diff --git a/substrate/__init__.py b/substrate/__init__.py index 750856a..bc3fe59 100644 --- a/substrate/__init__.py +++ b/substrate/__init__.py @@ -4,13 +4,6 @@ 20240612.20240612 """ -import warnings - -import pydantic - -if pydantic.VERSION.startswith("2"): - warnings.filterwarnings("ignore", category=pydantic.warnings.PydanticDeprecatedSince20) - from .nodes import ( CLIP, JinaV2, diff --git a/substrate/nodes.py b/substrate/nodes.py index c352e33..4966499 100644 --- a/substrate/nodes.py +++ b/substrate/nodes.py @@ -4,50 +4,54 @@ """ from __future__ import annotations +import warnings from typing import Any, Dict, List, Optional from typing_extensions import Literal -from .core.models import ( - CLIPOut, - JinaV2Out, - EmbedTextOut, - EmbedImageOut, - EraseImageOut, - ExperimentalOut, - FetchVectorsOut, - Firellava13BOut, - GenerateJSONOut, - GenerateTextOut, - InpaintImageOut, - UpscaleImageOut, - DeleteVectorsOut, - GenerateImageOut, - UpdateVectorsOut, - GenerateSpeechOut, - MultiEmbedTextOut, - MultiEmbedImageOut, - SegmentAnythingOut, - ListVectorStoresOut, - Llama3Instruct8BOut, - QueryVectorStoreOut, - RemoveBackgroundOut, - TranscribeSpeechOut, - BatchGenerateJSONOut, - BatchGenerateTextOut, - DeleteVectorStoreOut, - Llama3Instruct70BOut, - Mistral7BInstructOut, - MultiGenerateJSONOut, - MultiGenerateTextOut, - MultiInpaintImageOut, - SegmentUnderPointOut, - MultiGenerateImageOut, - Mixtral8x7BInstructOut, - FindOrCreateVectorStoreOut, - StableDiffusionXLInpaintOut, - StableDiffusionXLLightningOut, - StableDiffusionXLControlNetOut, -) +# filter pydantic v2 deprecation warnings +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + from .core.models import ( + CLIPOut, + JinaV2Out, + EmbedTextOut, + EmbedImageOut, + EraseImageOut, + ExperimentalOut, + FetchVectorsOut, + Firellava13BOut, + GenerateJSONOut, + GenerateTextOut, + InpaintImageOut, + UpscaleImageOut, + DeleteVectorsOut, + GenerateImageOut, + UpdateVectorsOut, + GenerateSpeechOut, + MultiEmbedTextOut, + MultiEmbedImageOut, + SegmentAnythingOut, + ListVectorStoresOut, + Llama3Instruct8BOut, + QueryVectorStoreOut, + RemoveBackgroundOut, + TranscribeSpeechOut, + BatchGenerateJSONOut, + BatchGenerateTextOut, + DeleteVectorStoreOut, + Llama3Instruct70BOut, + Mistral7BInstructOut, + MultiGenerateJSONOut, + MultiGenerateTextOut, + MultiInpaintImageOut, + SegmentUnderPointOut, + MultiGenerateImageOut, + Mixtral8x7BInstructOut, + FindOrCreateVectorStoreOut, + StableDiffusionXLInpaintOut, + StableDiffusionXLLightningOut, + StableDiffusionXLControlNetOut, + ) from .core.corenode import CoreNode from .future_dataclass_models import ( FutureCLIPOut, From b6ca7a6dbe2dc18600907660de59448493ee2552 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 11:50:58 -0400 Subject: [PATCH 5/7] fix codegen --- substrate/nodes.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/substrate/nodes.py b/substrate/nodes.py index 4966499..96a054b 100644 --- a/substrate/nodes.py +++ b/substrate/nodes.py @@ -5,8 +5,8 @@ from __future__ import annotations import warnings -from typing import Any, Dict, List, Optional -from typing_extensions import Literal + +from .core.corenode import CoreNode # filter pydantic v2 deprecation warnings with warnings.catch_warnings(): @@ -52,7 +52,9 @@ StableDiffusionXLLightningOut, StableDiffusionXLControlNetOut, ) -from .core.corenode import CoreNode +from typing import Any, Dict, List, Optional +from typing_extensions import Literal + from .future_dataclass_models import ( FutureCLIPOut, FutureJinaV2Out, From 30e3e3f6c1bb3085d47795065dd938dcd05d9c8d Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 12:38:30 -0400 Subject: [PATCH 6/7] api update --- substrate/GEN_VERSION | 2 +- substrate/__init__.py | 2 +- substrate/core/models.py | 12 ++++++++---- substrate/future_dataclass_models.py | 15 ++++++++++----- substrate/nodes.py | 9 ++++++--- substrate/typeddict_models.py | 12 ++++++++---- 6 files changed, 34 insertions(+), 18 deletions(-) diff --git a/substrate/GEN_VERSION b/substrate/GEN_VERSION index 3caa8ba..9cb385b 100644 --- a/substrate/GEN_VERSION +++ b/substrate/GEN_VERSION @@ -1 +1 @@ -20240612.20240612 \ No newline at end of file +20240612.20240613 \ No newline at end of file diff --git a/substrate/__init__.py b/substrate/__init__.py index bc3fe59..4a4b8d4 100644 --- a/substrate/__init__.py +++ b/substrate/__init__.py @@ -1,7 +1,7 @@ """ 𐃏 Substrate Python SDK -20240612.20240612 +20240612.20240613 """ from .nodes import ( diff --git a/substrate/core/models.py b/substrate/core/models.py index a1fee99..21ea013 100644 --- a/substrate/core/models.py +++ b/substrate/core/models.py @@ -237,6 +237,10 @@ class Config: """ Maximum number of tokens to generate. """ + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + """ + Selected model. + """ class BatchGenerateTextOut(BaseModel): @@ -293,10 +297,6 @@ class BatchGenerateJSONIn(BaseModel): class Config: extra = Extra.allow - model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" - """ - Selected model. - """ prompts: List[str] """ Batch input prompts. @@ -313,6 +313,10 @@ class Config: """ Maximum number of tokens to generate. """ + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + """ + Selected model. + """ class BatchGenerateJSONOut(BaseModel): diff --git a/substrate/future_dataclass_models.py b/substrate/future_dataclass_models.py index 5f2791e..dfad7d1 100644 --- a/substrate/future_dataclass_models.py +++ b/substrate/future_dataclass_models.py @@ -296,6 +296,11 @@ class FutureBatchGenerateTextIn: (Future reference) Maximum number of tokens to generate. """ + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + """ + (Future reference) + Selected model. + """ @dataclass @@ -378,11 +383,6 @@ class FutureBatchGenerateJSONIn: (Future reference) JSON schema to guide `json_object` response. """ - model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" - """ - (Future reference) - Selected model. - """ temperature: float = 0.4 """ (Future reference) @@ -393,6 +393,11 @@ class FutureBatchGenerateJSONIn: (Future reference) Maximum number of tokens to generate. """ + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B" + """ + (Future reference) + Selected model. + """ @dataclass diff --git a/substrate/nodes.py b/substrate/nodes.py index 96a054b..d109638 100644 --- a/substrate/nodes.py +++ b/substrate/nodes.py @@ -291,6 +291,7 @@ def __init__( prompts: List[str], temperature: float = 0.4, max_tokens: Optional[int] = None, + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, **kwargs, ): @@ -299,6 +300,7 @@ def __init__( prompts: Batch input prompts. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. + model: Selected model. https://substrate.run/nodes#BatchGenerateText """ @@ -306,6 +308,7 @@ def __init__( prompts=prompts, temperature=temperature, max_tokens=max_tokens, + model=model, hide=hide, out_type=BatchGenerateTextOut, **kwargs, @@ -377,9 +380,9 @@ def __init__( self, prompts: List[str], json_schema: Dict[str, Any], - model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", temperature: float = 0.4, max_tokens: Optional[int] = None, + model: Literal["Mistral7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B", hide: bool = False, **kwargs, ): @@ -387,18 +390,18 @@ def __init__( Args: prompts: Batch input prompts. json_schema: JSON schema to guide `json_object` response. - model: Selected model. temperature: Sampling temperature to use. Higher values make the output more random, lower values make the output more deterministic. max_tokens: Maximum number of tokens to generate. + model: Selected model. https://substrate.run/nodes#BatchGenerateJSON """ super().__init__( prompts=prompts, json_schema=json_schema, - model=model, temperature=temperature, max_tokens=max_tokens, + model=model, hide=hide, out_type=BatchGenerateJSONOut, **kwargs, diff --git a/substrate/typeddict_models.py b/substrate/typeddict_models.py index d6c52f7..b0ca3fc 100644 --- a/substrate/typeddict_models.py +++ b/substrate/typeddict_models.py @@ -203,6 +203,10 @@ class BatchGenerateTextIn(TypedDict): """ Maximum number of tokens to generate. """ + model: NotRequired[Literal["Mistral7BInstruct", "Llama3Instruct8B"]] + """ + Selected model. + """ class BatchGenerateTextOut(TypedDict): @@ -247,10 +251,6 @@ class MultiGenerateJSONOut(TypedDict): class BatchGenerateJSONIn(TypedDict): - model: NotRequired[Literal["Mistral7BInstruct", "Llama3Instruct8B"]] - """ - Selected model. - """ prompts: NotRequired[List[str]] """ Batch input prompts. @@ -267,6 +267,10 @@ class BatchGenerateJSONIn(TypedDict): """ Maximum number of tokens to generate. """ + model: NotRequired[Literal["Mistral7BInstruct", "Llama3Instruct8B"]] + """ + Selected model. + """ class BatchGenerateJSONOut(TypedDict): From c16dcb785d9359a89284814fe3744f5bb73aba05 Mon Sep 17 00:00:00 2001 From: benzguo Date: Thu, 13 Jun 2024 13:19:43 -0400 Subject: [PATCH 7/7] run format after sync --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2f44920..9c123ed 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ poetry.lock: pyproject.toml ensure: poetry.lock poetry install -sync: sync-version sync-codegen +sync: sync-version sync-codegen format sync-version: poetry run python scripts/sync_version.py