Skip to content

Commit

Permalink
generate code codegen
Browse files Browse the repository at this point in the history
  • Loading branch information
csreesan committed Aug 14, 2024
1 parent ee441f5 commit b92884b
Show file tree
Hide file tree
Showing 6 changed files with 264 additions and 87 deletions.
2 changes: 1 addition & 1 deletion substrate/GEN_VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
20240617.20240806
20240617.20240814
120 changes: 38 additions & 82 deletions substrate/__init__.py
Original file line number Diff line number Diff line change
@@ -1,103 +1,59 @@
"""
𐃏 Substrate Python SDK
20240617.20240806
20240617.20240814
"""

from .run_python import RunPython
from .nodes import (

Experimental,

Box,

CLIP,
If,

ComputeText,

MultiComputeText,

BatchComputeText,

BatchComputeJSON,

Box,
JinaV2,
EmbedText,
EmbedImage,
EraseImage,
ComputeJSON,

MultiComputeJSON,

Mistral7BInstruct,

Mixtral8x7BInstruct,

Llama3Instruct8B,

Llama3Instruct70B,

ComputeText,
Experimental,
FetchVectors,
Firellava13B,

GenerateImage,

MultiGenerateImage,

InpaintImage,

MultiInpaintImage,

StableDiffusionXLLightning,

StableDiffusionXLInpaint,

StableDiffusionXLControlNet,

StableVideoDiffusion,

InterpolateFrames,

TranscribeSpeech,

GenerateSpeech,

RemoveBackground,

EraseImage,

UpscaleImage,

SegmentUnderPoint,

SegmentAnything,

DeleteVectors,
GenerateImage,
SplitDocument,

EmbedText,

UpdateVectors,
GenerateSpeech,
MultiEmbedText,

EmbedImage,

MultiEmbedImage,

JinaV2,

CLIP,

FindOrCreateVectorStore,

SegmentAnything,
BatchComputeJSON,
BatchComputeText,
ListVectorStores,

DeleteVectorStore,

Llama3Instruct8B,
MultiComputeJSON,
MultiComputeText,
QueryVectorStore,

FetchVectors,

UpdateVectors,

DeleteVectors,
)
RemoveBackground,
TranscribeSpeech,
DeleteVectorStore,
InterpolateFrames,
Llama3Instruct70B,
Mistral7BInstruct,
MultiInpaintImage,
SegmentUnderPoint,
MultiGenerateImage,
Mixtral8x7BInstruct,
StableVideoDiffusion,
FindOrCreateVectorStore,
StableDiffusionXLInpaint,
StableDiffusionXLLightning,
StableDiffusionXLControlNet,
)
from .core.sb import sb
from ._version import __version__
from .substrate import Secrets, Substrate, SubstrateResponse
from .run_python import RunPython

__all__ = [
"__version__",
Expand Down Expand Up @@ -150,4 +106,4 @@
"FetchVectors",
"UpdateVectors",
"DeleteVectors",
]
]
53 changes: 52 additions & 1 deletion substrate/core/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,13 @@ class Config:
"""
Maximum number of tokens to generate.
"""
model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B"
model: Literal[
"Mistral7BInstruct",
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"gpt-4o",
] = "Llama3Instruct8B"
"""
Selected model.
"""
Expand All @@ -236,6 +242,51 @@ class Config:
"""


class GenerateCodeIn(BaseModel):
class Config:
extra = Extra.allow

prompt: str
"""
Input prompt.
"""
language: Literal[
"python",
"java",
"c++",
"javascript",
"typescript",
"php",
"html",
"c#",
"sql",
"ruby",
"tex",
"shell",
]
"""
Language of the code.
"""
temperature: Annotated[Optional[float], Field(ge=0.0, le=1.0)] = None
"""
Higher values make the output more random, lower values make the output more deterministic.
"""
max_tokens: Optional[int] = None
"""
Maximum number of tokens to generate.
"""


class GenerateCodeOut(BaseModel):
class Config:
extra = Extra.allow

code: str
"""
Code response.
"""


class MultiComputeTextIn(BaseModel):
class Config:
extra = Extra.allow
Expand Down
62 changes: 61 additions & 1 deletion substrate/future_dataclass_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,13 @@ class FutureComputeJSONIn:
(Future reference)
Maximum number of tokens to generate.
"""
model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B"
model: Literal[
"Mistral7BInstruct",
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"gpt-4o",
] = "Llama3Instruct8B"
"""
(Future reference)
Selected model.
Expand All @@ -295,6 +301,60 @@ class FutureComputeJSONOut:
"""


@dataclass
class FutureGenerateCodeIn:
"""
Future reference to FutureGenerateCodeIn
"""

prompt: str
"""
(Future reference)
Input prompt.
"""
language: Literal[
"python",
"java",
"c++",
"javascript",
"typescript",
"php",
"html",
"c#",
"sql",
"ruby",
"tex",
"shell",
]
"""
(Future reference)
Language of the code.
"""
temperature: Optional[float] = None
"""
(Future reference)
Higher values make the output more random, lower values make the output more deterministic.
"""
max_tokens: Optional[int] = None
"""
(Future reference)
Maximum number of tokens to generate.
"""


@dataclass
class FutureGenerateCodeOut:
"""
Future reference to FutureGenerateCodeOut
"""

code: str
"""
(Future reference)
Code response.
"""


@dataclass
class FutureMultiComputeTextIn:
"""
Expand Down
63 changes: 62 additions & 1 deletion substrate/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,13 @@ def __init__(
json_schema: Dict[str, Any],
temperature: float = 0.4,
max_tokens: Optional[int] = None,
model: Literal["Mistral7BInstruct", "Mixtral8x7BInstruct", "Llama3Instruct8B"] = "Llama3Instruct8B",
model: Literal[
"Mistral7BInstruct",
"Mixtral8x7BInstruct",
"Llama3Instruct8B",
"Llama3Instruct70B",
"gpt-4o",
] = "Llama3Instruct8B",
hide: bool = False,
**kwargs,
):
Expand Down Expand Up @@ -313,6 +319,61 @@ def future(self) -> FutureComputeJSONOut: # type: ignore
return super().future # type: ignore


class GenerateCode(CoreNode[GenerateCodeOut]):
"""https://substrate.run/nodes#GenerateCode"""

def __init__(
self,
prompt: str,
language: Literal[
"python",
"java",
"c++",
"javascript",
"typescript",
"php",
"html",
"c#",
"sql",
"ruby",
"tex",
"shell",
],
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
hide: bool = False,
**kwargs,
):
"""
Args:
prompt: Input prompt.
language: Language of the code.
temperature: Higher values make the output more random, lower values make the output more deterministic.
max_tokens: Maximum number of tokens to generate.
https://substrate.run/nodes#GenerateCode
"""
super().__init__(
prompt=prompt,
language=language,
temperature=temperature,
max_tokens=max_tokens,
hide=hide,
out_type=GenerateCodeOut,
**kwargs,
)
self.node = "GenerateCode"

@property
def future(self) -> FutureGenerateCodeOut: # type: ignore
"""
Future reference to this node's output.
https://substrate.run/nodes#GenerateCode
"""
return super().future # type: ignore


class MultiComputeText(CoreNode[MultiComputeTextOut]):
"""https://substrate.run/nodes#MultiComputeText"""

Expand Down
Loading

0 comments on commit b92884b

Please sign in to comment.