Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SVD SDK #45

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion substrate/GEN_VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
20240617.20240724
20240617.20240726
4 changes: 3 additions & 1 deletion substrate/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""
𐃏 Substrate Python SDK

20240617.20240724
20240617.20240726
"""

from .nodes import (
Expand Down Expand Up @@ -43,6 +43,7 @@
SegmentUnderPoint,
MultiGenerateImage,
Mixtral8x7BInstruct,
StableVideoDiffusion,
FindOrCreateVectorStore,
StableDiffusionXLInpaint,
StableDiffusionXLLightning,
Expand Down Expand Up @@ -80,6 +81,7 @@
"StableDiffusionXLLightning",
"StableDiffusionXLInpaint",
"StableDiffusionXLControlNet",
"StableVideoDiffusion",
"TranscribeSpeech",
"GenerateSpeech",
"RemoveBackground",
Expand Down
44 changes: 44 additions & 0 deletions substrate/core/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -955,6 +955,50 @@ class Config:
"""


class StableVideoDiffusionIn(BaseModel):
class Config:
extra = Extra.allow

image_uri: str
"""
Original image.
"""
store: Optional[str] = None
"""
Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string.
"""
output_format: Literal["gif", "mp4"] = "gif"
"""
Output video format.
"""
seed: Optional[int] = None
"""
Seed for deterministic generation. Default is a random seed.
"""
fps: int = 7
"""
Frames per second of the generated video.
"""
motion_bucket_id: int = 180
"""
The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
"""
noise: float = 0.1
"""
The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.
"""


class StableVideoDiffusionOut(BaseModel):
class Config:
extra = Extra.allow

video_uri: str
"""
Generated video.
"""


class InpaintImageIn(BaseModel):
class Config:
extra = Extra.allow
Expand Down
10 changes: 5 additions & 5 deletions substrate/core/sb.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@

from .client.future import Future
from .future_directive import (
FString,
JQDirective,
JQTargetType,
JinjaTemplate,
JinjaDirective,
ConcatDirective,
FormatDirective,
JQDirectiveTarget,
ConcatDirectiveItem,
FormatDirective,
FString,
)
from .client.find_futures_client import find_futures_client

Expand Down Expand Up @@ -88,12 +88,12 @@ def jinja(cls, template_str: Union[Future, str], variables: Dict[str, Any]) -> s
def format(cls, f_string: Union[Future, str], variables: Dict[str, Any]) -> str:
future_id, val = (f_string.id, None) if isinstance(f_string, Future) else (None, f_string)
directive = FormatDirective(
f_string=FString(future_id=future_id, val=val), variables=variables,
f_string=FString(future_id=future_id, val=val),
variables=variables,
)
result = Future(directive=directive)
if isinstance(f_string, Future):
result.FutureG.add_edge(f_string, result)
for dep in find_futures_client(variables):
result.FutureG.add_edge(dep, result)
return result # type: ignore

return result # type: ignore
59 changes: 57 additions & 2 deletions substrate/future_dataclass_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,6 @@ class FutureComputeTextIn:
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"Llama3Instruct405B",
"Firellava13B",
"gpt-4o",
"gpt-4o-mini",
Expand Down Expand Up @@ -1160,7 +1159,7 @@ class FutureStableDiffusionXLControlNetIn:
(Future reference)
Text prompt.
"""
num_images: int = 1
num_images: int
"""
(Future reference)
Number of images to generate.
Expand Down Expand Up @@ -1210,6 +1209,62 @@ class FutureStableDiffusionXLControlNetOut:
"""


@dataclass
class FutureStableVideoDiffusionIn:
"""
Future reference to FutureStableVideoDiffusionIn
"""

image_uri: str
"""
(Future reference)
Original image.
"""
store: Optional[str] = None
"""
(Future reference)
Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string.
"""
output_format: Literal["gif", "mp4"] = "gif"
"""
(Future reference)
Output video format.
"""
seed: Optional[int] = None
"""
(Future reference)
Seed for deterministic generation. Default is a random seed.
"""
fps: int = 7
"""
(Future reference)
Frames per second of the generated video.
"""
motion_bucket_id: int = 180
"""
(Future reference)
The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
"""
noise: float = 0.1
"""
(Future reference)
The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.
"""


@dataclass
class FutureStableVideoDiffusionOut:
"""
Future reference to FutureStableVideoDiffusionOut
"""

video_uri: str
"""
(Future reference)
Generated video.
"""


@dataclass
class FutureInpaintImageIn:
"""
Expand Down
56 changes: 54 additions & 2 deletions substrate/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
SegmentUnderPointOut,
MultiGenerateImageOut,
Mixtral8x7BInstructOut,
StableVideoDiffusionOut,
FindOrCreateVectorStoreOut,
StableDiffusionXLInpaintOut,
StableDiffusionXLLightningOut,
Expand Down Expand Up @@ -101,6 +102,7 @@
FutureSegmentUnderPointOut,
FutureMultiGenerateImageOut,
FutureMixtral8x7BInstructOut,
FutureStableVideoDiffusionOut,
FutureFindOrCreateVectorStoreOut,
FutureStableDiffusionXLInpaintOut,
FutureStableDiffusionXLLightningOut,
Expand Down Expand Up @@ -223,7 +225,6 @@ def __init__(
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"Llama3Instruct405B",
"Firellava13B",
"gpt-4o",
"gpt-4o-mini",
Expand Down Expand Up @@ -892,7 +893,7 @@ def __init__(
image_uri: str,
control_method: Literal["edge", "depth", "illusion", "tile"],
prompt: str,
num_images: int = 1,
num_images: int,
output_resolution: int = 1024,
negative_prompt: Optional[str] = None,
store: Optional[str] = None,
Expand Down Expand Up @@ -944,6 +945,57 @@ def future(self) -> FutureStableDiffusionXLControlNetOut: # type: ignore
return super().future # type: ignore


class StableVideoDiffusion(CoreNode[StableVideoDiffusionOut]):
"""https://substrate.run/nodes#StableVideoDiffusion"""

def __init__(
self,
image_uri: str,
store: Optional[str] = None,
output_format: Literal["gif", "mp4"] = "gif",
seed: Optional[int] = None,
fps: int = 7,
motion_bucket_id: int = 180,
noise: float = 0.1,
hide: bool = False,
**kwargs,
):
"""
Args:
image_uri: Original image.
store: Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string.
output_format: Output video format.
seed: Seed for deterministic generation. Default is a random seed.
fps: Frames per second of the generated video.
motion_bucket_id: The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
noise: The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.

https://substrate.run/nodes#StableVideoDiffusion
"""
super().__init__(
image_uri=image_uri,
store=store,
output_format=output_format,
seed=seed,
fps=fps,
motion_bucket_id=motion_bucket_id,
noise=noise,
hide=hide,
out_type=StableVideoDiffusionOut,
**kwargs,
)
self.node = "StableVideoDiffusion"

@property
def future(self) -> FutureStableVideoDiffusionOut: # type: ignore
"""
Future reference to this node's output.

https://substrate.run/nodes#StableVideoDiffusion
"""
return super().future # type: ignore


class InpaintImage(CoreNode[InpaintImageOut]):
"""https://substrate.run/nodes#InpaintImage"""

Expand Down
39 changes: 38 additions & 1 deletion substrate/typeddict_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ class ComputeTextIn(TypedDict):
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"Llama3Instruct405B",
"Firellava13B",
"gpt-4o",
"gpt-4o-mini",
Expand Down Expand Up @@ -813,6 +812,44 @@ class StableDiffusionXLControlNetOut(TypedDict):
"""


class StableVideoDiffusionIn(TypedDict):
image_uri: NotRequired[str]
"""
Original image.
"""
store: NotRequired[str]
"""
Use "hosted" to return a video URL hosted on Substrate. You can also provide a URL to a registered [file store](https://docs.substrate.run/reference/external-files). If unset, the video data will be returned as a base64-encoded string.
"""
output_format: NotRequired[Literal["gif", "mp4"]]
"""
Output video format.
"""
seed: NotRequired[int]
"""
Seed for deterministic generation. Default is a random seed.
"""
fps: NotRequired[int]
"""
Frames per second of the generated video.
"""
motion_bucket_id: NotRequired[int]
"""
The motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video.
"""
noise: NotRequired[float]
"""
The amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video.
"""


class StableVideoDiffusionOut(TypedDict):
video_uri: NotRequired[str]
"""
Generated video.
"""


class InpaintImageIn(TypedDict):
image_uri: NotRequired[str]
"""
Expand Down
Loading