diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2f2b3be4..ffd276ca 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,10 @@ Change log 0.13.0 (unreleased) ------------------- +**Other changes** + +- Split `Var`-s internally to help the garbage collector in collecting propagated values. + **Support change** - Support for ``Python 3.8`` has been dropped. diff --git a/src/spox/_adapt.py b/src/spox/_adapt.py index 32017dab..8877e40e 100644 --- a/src/spox/_adapt.py +++ b/src/spox/_adapt.py @@ -4,7 +4,6 @@ import warnings from typing import Optional -import numpy as np import onnx import onnx.version_converter @@ -14,8 +13,7 @@ from ._node import Node from ._schemas import SCHEMAS from ._scope import Scope -from ._utils import from_array -from ._var import Var +from ._var import _VarInfo def adapt_node( @@ -23,7 +21,7 @@ def adapt_node( proto: onnx.NodeProto, source_version: int, target_version: int, - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], ) -> Optional[list[onnx.NodeProto]]: if source_version == target_version: return None @@ -32,21 +30,16 @@ def adapt_node( # By using a dictionary we ensure that we only have a single # ValueInfo per (possibly repeated) input name. input_info = { - var_names[var]: var.unwrap_type()._to_onnx_value_info( - var_names[var], _traceback_name=f"adapt-input {key}" + var_names[var_info]: var_info.unwrap_type()._to_onnx_value_info( + var_names[var_info], _traceback_name=f"adapt-input {key}" ) - for key, var in node.inputs.get_vars().items() + for key, var_info in node.inputs.get_var_infos().items() } output_info = [ - var.unwrap_type()._to_onnx_value_info( - var_names[var], _traceback_name=f"adapt-output {key}" + var_info.unwrap_type()._to_onnx_value_info( + var_names[var_info], _traceback_name=f"adapt-output {key}" ) - for key, var in node.outputs.get_vars().items() - ] - initializers = [ - from_array(var._value, name) # type: ignore - for name, var in node.inputs.get_vars().items() - if isinstance(var._value, np.ndarray) + for key, var_info in node.outputs.get_var_infos().items() ] except ValueError: return None @@ -57,7 +50,6 @@ def adapt_node( "spox__singleton_adapter_graph", list(input_info.values()), output_info, - initializers, ), opset_imports=[onnx.helper.make_operatorsetid("", source_version)], ) @@ -71,7 +63,7 @@ def adapt_inline( node: _Inline, protos: list[onnx.NodeProto], target_opsets: dict[str, int], - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], node_name: str, ) -> list[onnx.NodeProto]: source_version = max({v for d, v in node.opset_req if d in ("", "ai.onnx")}) @@ -99,7 +91,7 @@ def adapt_best_effort( node: Node, protos: list[onnx.NodeProto], opsets: dict[str, int], - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], node_names: dict[Node, str], ) -> Optional[list[onnx.NodeProto]]: if isinstance(node, _Inline): diff --git a/src/spox/_build.py b/src/spox/_build.py index 60c544a0..63556185 100644 --- a/src/spox/_build.py +++ b/src/spox/_build.py @@ -21,7 +21,7 @@ from ._node import Node from ._scope import Scope from ._traverse import iterative_dfs -from ._var import Var +from ._var import Var, _VarInfo, unwrap_vars if TYPE_CHECKING: from ._graph import Graph @@ -58,11 +58,11 @@ class BuildResult: scope: Scope nodes: dict[Node, tuple[onnx.NodeProto, ...]] - arguments: tuple[Var, ...] - results: tuple[Var, ...] + arguments: tuple[_VarInfo, ...] + results: tuple[_VarInfo, ...] opset_req: set[tuple[str, int]] functions: tuple["_function.Function", ...] - initializers: dict[Var, np.ndarray] + initializers: dict[_VarInfo, np.ndarray] class Builder: @@ -93,7 +93,7 @@ class ScopeTree: """ Structure representing the tree of scopes, which are identified with the respective graphs. - This structure is the base of the least-enclosing-scope algorithm. Every value (Var), and hence + This structure is the base of the least-enclosing-scope algorithm. Every value (VarInfo), and hence the responsible Node - up to its (Python object) identity may appear in multiple scopes, but it should best-cased be computed only once in the ONNX graph, same as in the Python source code. @@ -164,12 +164,12 @@ def lca(self, a: "Graph", b: "Graph") -> "Graph": graphs: set["Graph"] graph_topo: list["Graph"] # Arguments, results - arguments_of: dict["Graph", list[Var]] - results_of: dict["Graph", list[Var]] + arguments_of: dict["Graph", list[_VarInfo]] + results_of: dict["Graph", list[_VarInfo]] source_of: dict["Graph", Node] # Arguments found by traversal - all_arguments_in: dict["Graph", set[Var]] - claimed_arguments_in: dict["Graph", set[Var]] + all_arguments_in: dict["Graph", set[_VarInfo]] + claimed_arguments_in: dict["Graph", set[_VarInfo]] # Scopes scope_tree: ScopeTree scope_own: dict["Graph", list[Node]] @@ -218,7 +218,7 @@ def get_intro_results( var._rename(key) return vars - def discover(self, graph: "Graph") -> tuple[set[Var], set[Var]]: + def discover(self, graph: "Graph") -> tuple[set[_VarInfo], set[_VarInfo]]: """ Run the discovery step of the build process. Resolves arguments and results for the involved graphs. Finds the topological ordering between (sub)graphs and sets their owners (nodes of which they are attributes). @@ -244,8 +244,8 @@ def discover(self, graph: "Graph") -> tuple[set[Var], set[Var]]: # Create and set the source & results of this graph if not graph.requested_results: raise BuildError(f"Graph {graph} has no results.") - self.results_of[graph] = self.get_intro_results( - graph.requested_results, graph is self.main + self.results_of[graph] = unwrap_vars( + self.get_intro_results(graph.requested_results, graph is self.main) ) self.source_of[graph] = self.results_of[graph][0]._op @@ -289,8 +289,8 @@ def collect_arguments(nd: Node): self.arguments_of[graph] = list(all_arguments - claimed_arguments) else: # If there is a request, we may not have found it by traversal if an argument was unused. - all_arguments |= set(graph.requested_arguments) - self.arguments_of[graph] = list(graph.requested_arguments) + all_arguments |= set(unwrap_vars(graph.requested_arguments)) + self.arguments_of[graph] = unwrap_vars(graph.requested_arguments) if set(self.arguments_of[graph]) & claimed_arguments: raise BuildError( @@ -432,7 +432,7 @@ def compile_graph( # A bunch of model metadata we're collecting opset_req: set[tuple[str, int]] = set() functions: list[_function.Function] = [] - initializers: dict[Var, np.ndarray] = {} + initializers: dict[_VarInfo, np.ndarray] = {} # Add arguments to our scope for arg in self.arguments_of[graph]: diff --git a/src/spox/_debug.py b/src/spox/_debug.py index 81141928..a9b7c0aa 100644 --- a/src/spox/_debug.py +++ b/src/spox/_debug.py @@ -4,7 +4,7 @@ import sys from contextlib import contextmanager -from spox._var import Var +from spox._var import _VarInfo # If `STORE_TRACEBACK` is `True` any node created will store a traceback for its point of creation. STORE_TRACEBACK = False @@ -36,7 +36,7 @@ def show_construction_tracebacks(debug_index): if -1 in found: del found[-1] for name, obj in reversed(found.values()): - if isinstance(obj, Var): + if isinstance(obj, _VarInfo): if not obj: continue node = obj._op diff --git a/src/spox/_fields.py b/src/spox/_fields.py index d02ca742..f2912955 100644 --- a/src/spox/_fields.py +++ b/src/spox/_fields.py @@ -3,12 +3,15 @@ import dataclasses import enum +import warnings from collections.abc import Iterable, Iterator, Sequence from dataclasses import dataclass -from typing import Any, Optional, Union +from typing import Any, Optional, Union, cast from ._attributes import Attr -from ._var import Var +from ._exceptions import InferenceWarning +from ._value_prop import PropDict, PropValue +from ._var import Var, _VarInfo @dataclass @@ -31,20 +34,65 @@ class VarFieldKind(enum.Enum): VARIADIC = 2 +class BaseVars: + def __init__(self, vars): + self.vars = vars + + def _unpack_to_any(self): + """Unpack the stored fields into a tuple of appropriate length, typed as Any.""" + return tuple(self.vars.values()) + + def _flatten(self): + """Iterate over the pairs of names and values of fields in this object.""" + for key, value in self.vars.items(): + if value is None or isinstance(value, Var): + yield key, value + else: + yield from ((f"{key}_{i}", v) for i, v in enumerate(value)) + + def flatten_vars(self): + """Return a flat mapping by name of all the VarInfos in this object.""" + return {key: var for key, var in self._flatten() if var is not None} + + def __getattr__(self, attr: str) -> Union["Var", Sequence["Var"]]: + """Retrieves the attribute if present in the stored variables.""" + try: + return self.vars[attr] + except KeyError: + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {attr!r}" + ) + + def __setattr__(self, attr: str, value: Union["Var", Sequence["Var"]]) -> None: + """Sets the attribute to a value if the attribute is present in the stored variables.""" + if attr == "vars": + super().__setattr__(attr, value) + else: + self.vars[attr] = value + + def __getitem__(self, key: str): + """Allows dictionary-like access to retrieve variables.""" + return self.vars[key] + + def __setitem__(self, key: str, value) -> None: + """Allows dictionary-like access to set variables.""" + self.vars[key] = value + + @dataclass -class BaseVars(BaseFields): +class BaseVarInfos(BaseFields): def __post_init__(self): # Check if passed fields are of the appropriate types based on field kinds for field in dataclasses.fields(self): value = getattr(self, field.name) field_type = self._get_field_type(field) if field_type == VarFieldKind.SINGLE: - if not isinstance(value, Var): - raise TypeError(f"Field expected Var, got: {type(value)}.") + if not isinstance(value, _VarInfo): + raise TypeError(f"Field expected VarInfo, got: {type(value)}.") elif field_type == VarFieldKind.OPTIONAL: - if value is not None and not isinstance(value, Var): + if value is not None and not isinstance(value, _VarInfo): raise TypeError( - f"Optional must be Var or None, got: {type(value)}." + f"Optional must be VarInfo or None, got: {type(value)}." ) elif field_type == VarFieldKind.VARIADIC: if not isinstance(value, Iterable): @@ -53,31 +101,31 @@ def __post_init__(self): ) # Cast to tuple to avoid accidental mutation setattr(self, field.name, tuple(value)) - if bad := {type(var) for var in value} - {Var}: + if bad := {type(var) for var in value} - {_VarInfo}: raise TypeError( - f"Variadic field must only consist of Vars, got: {bad}." + f"Variadic field must only consist of VarInfos, got: {bad}." ) @classmethod def _get_field_type(cls, field) -> VarFieldKind: """Access the kind of the field (single, optional, variadic) based on its type annotation.""" - if field.type == Var: + if field.type == _VarInfo: return VarFieldKind.SINGLE - elif field.type == Optional[Var]: + elif field.type == Optional[_VarInfo]: return VarFieldKind.OPTIONAL - elif field.type == Sequence[Var]: + elif field.type == Sequence[_VarInfo]: return VarFieldKind.VARIADIC raise ValueError(f"Bad field type: '{field.type}'.") - def _flatten(self) -> Iterable[tuple[str, Optional[Var]]]: + def _flatten(self) -> Iterable[tuple[str, Optional[_VarInfo]]]: """Iterate over the pairs of names and values of fields in this object.""" for key, value in self.__dict__.items(): - if value is None or isinstance(value, Var): + if value is None or isinstance(value, _VarInfo): yield key, value else: yield from ((f"{key}_{i}", v) for i, v in enumerate(value)) - def __iter__(self) -> Iterator[Optional[Var]]: + def __iter__(self) -> Iterator[Optional[_VarInfo]]: """Iterate over the values of fields in this object.""" yield from (v for _, v in self._flatten()) @@ -85,11 +133,11 @@ def __len__(self) -> int: """Count the number of fields in this object (should be same as declared in the class).""" return sum(1 for _ in self) - def get_vars(self) -> dict[str, Var]: - """Return a flat mapping by name of all the Vars in this object.""" + def get_var_infos(self) -> dict[str, _VarInfo]: + """Return a flat mapping by name of all the VarInfos in this object.""" return {key: var for key, var in self._flatten() if var is not None} - def get_fields(self) -> dict[str, Union[None, Var, Sequence[Var]]]: + def get_fields(self) -> dict[str, Union[None, _VarInfo, Sequence[_VarInfo]]]: """Return a mapping of all fields stored in this object by name.""" return self.__dict__.copy() @@ -102,15 +150,79 @@ def fully_typed(self) -> bool: """Check if all stored variables have a concrete type.""" return all( var.type is not None and var.type._is_concrete - for var in self.get_vars().values() + for var in self.get_var_infos().values() ) @dataclass -class BaseInputs(BaseVars): - pass +class BaseInputs(BaseVarInfos): + def vars(self, prop_values: Optional[PropDict] = None) -> BaseVars: + if prop_values is None: + prop_values = {} + + vars_dict: dict[str, Union[Var, Sequence[Var]]] = {} + + for field in dataclasses.fields(self): + field_type = self._get_field_type(field) + field_value = getattr(self, field.name) + + if field_type == VarFieldKind.SINGLE: + prop_value = cast(PropValue, prop_values.get(field.name, None)) + vars_dict[field.name] = Var(field_value, prop_value) + + elif ( + field_type == VarFieldKind.OPTIONAL + and prop_values.get(field.name, None) is not None + ): + prop_value = cast(PropValue, prop_values.get(field.name, None)) + vars_dict[field.name] = Var(field_value, prop_value) + + elif field_type == VarFieldKind.VARIADIC: + vars = [] + + for i, var_info in enumerate(field_value): + var_value = prop_values.get(f"{field.name}_{i}", None) + assert isinstance(var_value, PropValue) + vars.append(Var(var_info, var_value)) + + vars_dict[field.name] = vars + + return BaseVars(vars_dict) @dataclass -class BaseOutputs(BaseVars): - pass +class BaseOutputs(BaseVarInfos): + def _propagate_vars(self, prop_values: Optional[PropDict] = None) -> BaseVars: + if prop_values is None: + prop_values = {} + + def _create_var(key, var_info): + ret = Var(var_info, None) + + if var_info.type is None or key not in prop_values: + return ret + + prop = PropValue(var_info.type, prop_values.get(key)) + if prop.check(): + ret._value = prop + else: + warnings.warn( + InferenceWarning( + f"Propagated value {prop} does not type-check, dropping. " + f"Hint: this indicates a bug with the current value prop backend or type inference." + ) + ) + + return ret + + ret_dict = {} + + for key, var_info in self.__dict__.items(): + if var_info is None or isinstance(var_info, _VarInfo): + ret_dict[key] = _create_var(key, var_info) + else: + ret_dict[key] = [ + _create_var(f"{key}_{i}", v) for i, v in enumerate(var_info) + ] + + return BaseVars(ret_dict) diff --git a/src/spox/_function.py b/src/spox/_function.py index 79db4b9b..8aa47917 100644 --- a/src/spox/_function.py +++ b/src/spox/_function.py @@ -10,18 +10,18 @@ import onnx from . import _attributes -from ._fields import BaseAttributes, BaseInputs, BaseOutputs +from ._fields import BaseAttributes, BaseInputs, BaseOutputs, BaseVars from ._internal_op import _InternalNode from ._node import Node, OpType from ._type_system import Type -from ._var import Var +from ._var import Var, _VarInfo, unwrap_vars if TYPE_CHECKING: from . import _graph DEFAULT_FUNCTION_DOMAIN = "spox.default" -ConstructorT = TypeVar("ConstructorT", bound=Callable[..., Iterable[Var]]) +ConstructorT = TypeVar("ConstructorT", bound=Callable[..., Iterable[_VarInfo]]) class Function(_InternalNode): @@ -42,13 +42,13 @@ class Function(_InternalNode): via the ``to_onnx_function`` method. """ - func_args: dict[str, Var] + func_args: dict[str, _VarInfo] func_attrs: dict[str, _attributes.Attr] func_inputs: BaseInputs func_outputs: BaseOutputs func_graph: "_graph.Graph" - def constructor(self, attrs, inputs): + def constructor(self, attrs: dict[str, _attributes.Attr], inputs: BaseVars): """ Abstract method for functions. @@ -61,13 +61,15 @@ def constructor(self, attrs, inputs): f"Function {type(self).__name__} does not implement a constructor." ) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: from . import _graph - self.func_args = _graph.arguments_dict( - **{name: var.type for name, var in self.inputs.get_vars().items()} + func_args_var = _graph.arguments_dict( + **{name: var.type for name, var in self.inputs.get_var_infos().items()} ) + self.func_args = unwrap_vars(func_args_var) + self.func_attrs = {} for name, attr in self.attrs.get_fields().items(): if attr is None: @@ -77,14 +79,16 @@ def infer_output_types(self) -> dict[str, Type]: self.func_attrs[name] = attr self.func_inputs = self.Inputs(**self.func_args) # type: ignore - self.func_outputs = self.constructor(self.func_attrs, self.func_inputs) - self.func_graph = _graph.results(**self.func_outputs.get_vars()).with_arguments( - *self.func_args.values() + self.func_outputs = self.constructor( + self.func_attrs, self.func_inputs.vars(input_prop_values) ) + self.func_graph = _graph.results( + **self.func_outputs._propagate_vars(input_prop_values).flatten_vars() + ).with_arguments(*func_args_var.values()) return { name: var.type - for name, var in self.func_outputs.get_vars().items() + for name, var in self.func_outputs.get_var_infos().items() if var.type } @@ -125,11 +129,13 @@ def to_onnx_function( def _make_function_cls(fun, num_inputs, num_outputs, domain, version, name): _FuncInputs = make_dataclass( - "_FuncInputs", ((f"in{i}", Var) for i in range(num_inputs)), bases=(BaseInputs,) + "_FuncInputs", + ((f"in{i}", _VarInfo) for i in range(num_inputs)), + bases=(BaseInputs,), ) _FuncOutputs = make_dataclass( "_FuncOutputs", - ((f"out{i}", Var) for i in range(num_outputs)), + ((f"out{i}", _VarInfo) for i in range(num_outputs)), bases=(BaseOutputs,), ) @@ -142,8 +148,8 @@ class Attributes(BaseAttributes): Outputs = _FuncOutputs op_type = OpType(name, domain, version) - def constructor(self, attrs, inputs): - return self.Outputs(*fun(*inputs.get_fields().values())) + def constructor(self, attrs: dict[str, _attributes.Attr], inputs: BaseVars): + return self.Outputs(*unwrap_vars(fun(*inputs.flatten_vars().values()))) return _Func @@ -188,9 +194,12 @@ def init(*args: Var): def alt_fun(*args: Var) -> Iterable[Var]: cls = init(*args) - return ( - cls(cls.Attributes(), cls.Inputs(*args)).outputs.get_fields().values() - ) + return [ + Var(var_info) + for var_info in cls(cls.Attributes(), cls.Inputs(*unwrap_vars(args))) + .outputs.get_fields() + .values() + ] return alt_fun # type: ignore diff --git a/src/spox/_future.py b/src/spox/_future.py index ecaa5c7b..76be31f3 100644 --- a/src/spox/_future.py +++ b/src/spox/_future.py @@ -112,7 +112,9 @@ def _promote( ) # TODO: Handle more constant-target inconsistencies here? - def _promote_target(obj: Union[Var, np.generic, int, float]) -> Optional[Var]: + def _promote_target( + obj: Union[Var, np.generic, int, float], + ) -> Optional[Var]: if self.constant_promotion and isinstance(obj, (np.generic, int, float)): return self.op.const(np.array(obj, dtype=target_type)) elif isinstance(obj, Var): diff --git a/src/spox/_graph.py b/src/spox/_graph.py index 33369fd7..76bae2a9 100644 --- a/src/spox/_graph.py +++ b/src/spox/_graph.py @@ -22,7 +22,7 @@ from ._schemas import max_opset_policy from ._type_system import Tensor, Type from ._utils import from_array -from ._var import Var +from ._var import Var, _VarInfo def arguments_dict(**kwargs: Optional[Union[Type, np.ndarray]]) -> dict[str, Var]: @@ -43,24 +43,32 @@ def arguments_dict(**kwargs: Optional[Union[Type, np.ndarray]]) -> dict[str, Var for name, info in kwargs.items(): attr_name = AttrString(value=name, name="dummy") if isinstance(info, Type): - result[name] = Argument( - Argument.Attributes( - name=attr_name, - type=AttrType(value=info, name="dummy"), - default=None, - ), - BaseInputs(), - ).outputs.arg + result[name] = ( + Argument( + Argument.Attributes( + name=attr_name, + type=AttrType(value=info, name="dummy"), + default=None, + ), + BaseInputs(), + ) + .get_output_vars() + .arg + ) elif isinstance(info, np.ndarray): ty = Tensor(info.dtype, info.shape) - result[name] = Argument( - Argument.Attributes( - name=attr_name, - type=AttrType(value=ty, name="dummy"), - default=AttrTensor(value=info, name="dummy"), - ), - BaseInputs(), - ).outputs.arg + result[name] = ( + Argument( + Argument.Attributes( + name=attr_name, + type=AttrType(value=ty, name="dummy"), + default=AttrTensor(value=info, name="dummy"), + ), + BaseInputs(), + ) + .get_output_vars() + .arg + ) else: raise TypeError(f"Cannot construct argument from {type(info)}.") return result @@ -110,10 +118,14 @@ def initializer(arr: np.ndarray) -> Var: ------- Var which is always equal to the respective value provided by `arr`. """ - return _Initializer( - _Initializer.Attributes(value=AttrTensor(value=arr, name="dummy")), - BaseInputs(), - ).outputs.arg + return ( + _Initializer( + _Initializer.Attributes(value=AttrTensor(value=arr, name="dummy")), + BaseInputs(), + ) + .get_output_vars() + .arg + ) @dataclass(frozen=True, eq=False) @@ -222,7 +234,7 @@ def requested_results(self) -> dict[str, Var]: """Results (named) requested by this Graph (for building).""" return self._results - def get_arguments(self) -> dict[str, Var]: + def get_arguments(self) -> dict[str, _VarInfo]: """ Get the effective named arguments (after build) of this Graph. @@ -233,7 +245,7 @@ def get_arguments(self) -> dict[str, Var]: for var in self._get_build_result().arguments } - def get_results(self) -> dict[str, Var]: + def get_results(self) -> dict[str, _VarInfo]: """ Get the effective named results (after build) of this Graph. @@ -496,4 +508,5 @@ def subgraph(types: Iterable[Type], fun: Callable[..., Iterable[Var]]) -> Graph: outs = fun(*ins) if not (isinstance(outs, Iterable) and all(isinstance(out, Var) for out in outs)): raise TypeError("Subgraph result must be an Iterable of Var.") + return enum_results(*outs).with_arguments(*ins)._with_constructor(fun) diff --git a/src/spox/_inline.py b/src/spox/_inline.py index f3077f7d..6fa01d71 100644 --- a/src/spox/_inline.py +++ b/src/spox/_inline.py @@ -14,7 +14,7 @@ from spox._node import OpType from spox._scope import Scope from spox._type_system import Type -from spox._var import Var +from spox._var import _VarInfo from . import _value_prop @@ -86,11 +86,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Inline", "spox.internal", 0) @@ -111,7 +111,7 @@ def opset_req(self) -> set[tuple[str, int]]: ("", INTERNAL_MIN_OPSET) } - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: # First, type check that we match the ModelProto type requirements for i, var in zip(self.graph.input, self.inputs.inputs): if var.type is not None and not ( @@ -127,16 +127,18 @@ def infer_output_types(self) -> dict[str, Type]: for k, o in enumerate(self.graph.output) } - def propagate_values(self) -> dict[str, _value_prop.PropValueType]: + def propagate_values( + self, input_prop_values + ) -> dict[str, _value_prop.PropValueType]: if any( - var.type is None or var._value is None - for var in self.inputs.get_vars().values() + var_info.type is None or input_prop_values.get(var_info.name) is None + for var_info in self.model.graph.input ): return {} wrap_feed, run, unwrap_feed = _value_prop.get_backend_calls() input_feed = { - i.name: wrap_feed(var._value) - for i, var in zip(self.model.graph.input, self.inputs.inputs) + i.name: wrap_feed(input_prop_values.get(i.name)) + for i in self.model.graph.input } output_feed = run(self.model, input_feed) return { diff --git a/src/spox/_internal_op.py b/src/spox/_internal_op.py index f51f2579..84fbc2b1 100644 --- a/src/spox/_internal_op.py +++ b/src/spox/_internal_op.py @@ -19,8 +19,8 @@ from ._scope import Scope from ._shape import SimpleShape from ._type_system import Tensor, Type -from ._value_prop import PropValueType -from ._var import Var +from ._value_prop import PropDict, PropValueType +from ._var import Var, _VarInfo, unwrap_vars # This is a default used for internal operators that # require the default domain. The most common of these @@ -78,7 +78,7 @@ class Inputs(BaseInputs): @dataclass class Outputs(BaseOutputs): - arg: Var + arg: _VarInfo attrs: Attributes inputs: Inputs @@ -88,7 +88,7 @@ def post_init(self, **kwargs): if self.attrs.name is not None: self.outputs.arg._rename(self.attrs.name.value) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: # Output type is based on the value of the type attribute return {"arg": self.attrs.type.value} @@ -115,18 +115,18 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - arg: Var + arg: _VarInfo attrs: Attributes inputs: BaseInputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: # Output type is based on the value of the type attribute arr = self.attrs.value.value return {"arg": Tensor(arr.dtype, arr.shape)} - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: return {"arg": self.attrs.value.value} def update_metadata(self, opset_req, initializers, functions): @@ -149,11 +149,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Introduce", "spox.internal", 0) @@ -161,7 +161,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: return { f"outputs_{i}": arr.type for i, arr in enumerate(self.inputs.inputs) @@ -213,9 +213,11 @@ def intros(*args: Var) -> Sequence[Var]: Sequence[Var] Vars of the same value as ``args``, but with a shared dependency. """ - return _Introduce( - None, _Introduce.Inputs(args), out_variadic=len(args) - ).outputs.outputs + return ( + _Introduce(None, _Introduce.Inputs(unwrap_vars(args)), out_variadic=len(args)) + .get_output_vars() + .outputs + ) def intro(*args: Var) -> Var: @@ -246,8 +248,7 @@ def unsafe_cast(x: Var, typ: Type) -> Var: Var with the type reset to whatever was given. """ y = intro(x) - y.type = typ - y._value = x._value + y._var_info.type = typ return y diff --git a/src/spox/_node.py b/src/spox/_node.py index 322a09a9..bcd01d6f 100644 --- a/src/spox/_node.py +++ b/src/spox/_node.py @@ -19,8 +19,8 @@ from ._exceptions import InferenceWarning from ._fields import BaseAttributes, BaseInputs, BaseOutputs, VarFieldKind from ._type_system import Type -from ._value_prop import PropValue, PropValueType -from ._var import Var +from ._value_prop import PropDict +from ._var import _VarInfo if typing.TYPE_CHECKING: from ._graph import Graph @@ -85,6 +85,7 @@ class Node(ABC): out_variadic: Optional[int] _traceback: Union[list[str], None] + _validate: bool def __init__( self, @@ -94,7 +95,6 @@ def __init__( *, out_variadic: Optional[int] = None, infer_types: bool = True, - propagate_values: bool = True, validate: bool = True, **kwargs, ): @@ -113,9 +113,6 @@ def __init__( infer_types Whether to run type inference - setting types for output vars if previously None. Should always succeed if possible, possibly raising type errors if inputs/attributes are not correctly typed. - propagate_values - Whether to run value propagation - setting values for output vars if previously None. Should only succeed - if all inputs are constant (attributes always are). validate Whether to run some extra validation. The default validation only warns against unknown types. kwargs @@ -130,17 +127,16 @@ def __init__( # As inference functions may access which output vars we initialized (e.g. variadics) # we inject uninitialized vars first self.outputs = self._init_output_vars() - self.inference(infer_types, propagate_values) + self.inference(infer_types=infer_types) else: self.outputs = outputs + # Store validate for when the values are actually propagated + self._validate = validate + # Optionally store debug information about where this node was created self._traceback = traceback.format_stack() if STORE_TRACEBACK else None - # Performs type checking using known flags (like type_members) - # and warns if type inference failed (some types are None). - if validate: - self.validate_types() self.post_init(**kwargs) @property @@ -184,12 +180,10 @@ def signature(self) -> str: """Get a signature of this Node, including its inputs and attributes (but not outputs).""" def fmt_input(key, var): - return f"{key}: {var.type}" + ( - f" = {var._value}" if var._value is not None else "" - ) + return f"{key}: {var.type}" sign = ", ".join( - fmt_input(key, var) for key, var in self.inputs.get_vars().items() + fmt_input(key, var) for key, var in self.inputs.get_var_infos().items() ) sign = f"inputs [{sign}]" shown_attrs = { @@ -212,7 +206,7 @@ def pre_init(self, **kwargs): def post_init(self, **kwargs): """Post-initialization hook. Called at the end of ``__init__`` after other default fields are set.""" - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> PropDict: """ Propagate values from inputs, and, if possible, compute values for outputs as well. This method is used to implement ONNX partial data propagation - for example so that @@ -220,38 +214,49 @@ def propagate_values(self) -> dict[str, PropValueType]: """ return {} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: """ Inference routine for output types. Often overriden by inheriting Node types. - Returns a dictionary of output field names into Types for the respective Vars. + Returns a dictionary of output field names into Types for the respective VarInfos. """ return {} - def inference(self, infer_types: bool = True, propagate_values: bool = True): + def inference( + self, input_prop_values: Optional[PropDict] = None, infer_types: bool = True + ): + if input_prop_values is None: + input_prop_values = {} # Type inference routine - call infer_output_types if required # and check if it provides the expected outputs. - out_types = self.infer_output_types() if infer_types else {} + out_types = ( + self.infer_output_types(input_prop_values=input_prop_values) + if infer_types + else {} + ) - for key, var in self.outputs.get_vars().items(): - if var.type is None: # If no existing type from init_output_vars - # Attempt to use the ones from kwargs, if none then what type inference gave + for key, var in self.outputs.get_var_infos().items(): + typ = out_types.get(key) + if var.type is None or (typ is not None and typ._subtype(var.type)): + # If there is no type, or the infered type is a subtype + # we use the new type var.type = out_types.get(key) + def get_output_vars( + self, input_prop_values: Optional[PropDict] = None, infer_types: bool = True + ): + if input_prop_values is None: + input_prop_values = {} # After typing everything, try to get values for outputs - out_values = self.propagate_values() if propagate_values else {} - for key, var in self.outputs.get_vars().items(): - if var.type is not None and var._value is None and key in out_values: - prop = PropValue(var.type, out_values.get(key)) - if prop.check(): - var._value = prop - else: - warnings.warn( - InferenceWarning( - f"Propagated value {prop} does not type-check, dropping. " - f"Hint: this indicates a bug with the current value prop backend or type inference." - ) - ) + self.inference(infer_types=infer_types, input_prop_values=input_prop_values) + + # Performs type checking using known flags (like type_members) + # and warns if type inference failed (some types are None). + if self._validate: + self.validate_types() + + out_values = self.propagate_values(input_prop_values) + return self.outputs._propagate_vars(out_values) def validate_types(self) -> None: """Validation of types, ran at the end of Node creation.""" @@ -292,7 +297,7 @@ def _check_concrete_type(self, value_type: Type) -> Optional[str]: return None def _list_types(self, source): - return ((key, var.type) for key, var in source.get_vars().items()) + return ((key, var.type) for key, var in source.get_var_infos().items()) def _init_output_vars(self) -> BaseOutputs: """ @@ -309,31 +314,29 @@ def _init_output_vars(self) -> BaseOutputs: (variadic,) = variadics else: variadic = None - outputs: dict[str, Union[Var, Sequence[Var]]] = { - field.name: Var(self, None, None) + outputs: dict[str, Union[_VarInfo, Sequence[_VarInfo]]] = { + field.name: _VarInfo(self, None) for field in dataclasses.fields(self.Outputs) if field.name != variadic } if variadic is not None: assert self.out_variadic is not None - outputs[variadic] = [ - Var(self, None, None) for _ in range(self.out_variadic) - ] + outputs[variadic] = [_VarInfo(self, None) for _ in range(self.out_variadic)] return self.Outputs(**outputs) # type: ignore @property - def dependencies(self) -> Iterable[Var]: - """List of input Vars into this Node.""" - return (var for var in self.inputs.get_vars().values()) + def dependencies(self) -> Iterable[_VarInfo]: + """List of input VarInfos into this Node.""" + return (var for var in self.inputs.get_var_infos().values()) @property - def dependents(self) -> Iterable[Var]: - """List of output Vars from this Node.""" - return (var for var in self.outputs.get_vars().values()) + def dependents(self) -> Iterable[_VarInfo]: + """List of output VarInfos from this Node.""" + return (var for var in self.outputs.get_var_infos().values()) @property - def incident(self) -> Iterable[Var]: - """List of both input and output Vars for this Node.""" + def incident(self) -> Iterable[_VarInfo]: + """List of both input and output VarInfos for this Node.""" return itertools.chain(self.dependencies, self.dependents) @property diff --git a/src/spox/_public.py b/src/spox/_public.py index 101d8d40..cc168f14 100644 --- a/src/spox/_public.py +++ b/src/spox/_public.py @@ -17,6 +17,7 @@ from ._inline import _Inline from ._standard import _strip_dim_symbol from ._type_system import Type +from ._value_prop import PropDict from ._var import Var @@ -35,9 +36,13 @@ def argument(typ: Type) -> Var: An unnamed argument variable of given type that may be used as a model input to build a graph. """ - return _internal_op.Argument( - _internal_op.Argument.Attributes(type=AttrType(typ, "dummy"), default=None) - ).outputs.arg + return ( + _internal_op.Argument( + _internal_op.Argument.Attributes(type=AttrType(typ, "dummy"), default=None) + ) + .get_output_vars() + .arg + ) @contextlib.contextmanager @@ -49,8 +54,8 @@ def _temporary_renames(**kwargs: Var): pre: dict[Var, Optional[str]] = {} try: for name, arg in kwargs.items(): - pre[arg] = arg._name - arg._rename(name) + pre[arg] = arg._var_info._name + arg._var_info._rename(name) yield finally: for arg, name in pre.items(): @@ -118,6 +123,7 @@ def build( if not all(isinstance(var, Var) for var in outputs.values()): seen_types = {type(obj) for obj in outputs.values()} raise TypeError(f"Build outputs must be Vars, not {seen_types - {Var}}.") + if not all(isinstance(var._op, Argument) for var in inputs.values()): raise TypeError( "Build inputs must be `Var`s constructed using the `spox.argument` function. " @@ -297,11 +303,23 @@ def inline_inner(*args: Var, **kwargs: Var) -> dict[str, Var]: f"Error processing arguments, got {set(kwargs)}, expected {set(in_names)}." ) node = _Inline( - inputs=_Inline.Inputs([kwargs[name] for name in in_names]), + inputs=_Inline.Inputs([kwargs[name]._var_info for name in in_names]), out_variadic=len(model.graph.output), model=model, ) - return dict(zip(out_names, node.outputs.outputs)) + + prop_values: PropDict = { + name: kwargs[name]._value + for name in in_names + if kwargs[name]._value is not None + } + + return dict( + zip( + out_names, + node.get_output_vars(prop_values).flatten_vars().values(), + ) + ) return inline_inner diff --git a/src/spox/_scope.py b/src/spox/_scope.py index 50b1ad52..a8179158 100644 --- a/src/spox/_scope.py +++ b/src/spox/_scope.py @@ -5,7 +5,7 @@ from typing import Generic, Optional, TypeVar, Union, overload from ._node import Node -from ._var import Var +from ._var import _VarInfo H = TypeVar("H", bound=Hashable) @@ -18,7 +18,7 @@ class ScopeError(Exception): class ScopeSpace(Generic[H]): """ - Represents the namespace of a scope for some type H, like Node or Var. + Represents the namespace of a scope for some type H, like ``Node`` or ``_VarInfo``. Methods (and operators) on the namespace work both ways: both with names (str) and the named type (H). So ``__getitem__`` (``ScopeSpace[item]``) may be used for both the name of an object and the object of a name. @@ -152,15 +152,15 @@ class Scope: """ Class representing the state of an ONNX-rules scope. - Has namespaces (represented by a ScopeSpace) for Vars and Nodes. + Has namespaces (represented by a ScopeSpace) for VarInfos and Nodes. """ - var: ScopeSpace[Var] + var: ScopeSpace[_VarInfo] node: ScopeSpace[Node] def __init__( self, - sub_var: Optional[ScopeSpace[Var]] = None, + sub_var: Optional[ScopeSpace[_VarInfo]] = None, sub_node: Optional[ScopeSpace[Node]] = None, parent: Optional["Scope"] = None, ): @@ -180,7 +180,7 @@ def of(cls, *what): if not isinstance(key, str): key, value = value, key assert isinstance(key, str) - if isinstance(value, Var): + if isinstance(value, _VarInfo): scope.var[key] = value elif isinstance(value, Node): scope.node[key] = value @@ -202,7 +202,7 @@ def update(self, node: Node, prefix: str = "", force: bool = True): node Node to introduce in the scope. prefix - What value to prefix the node name with. If the Var has a predeclared name, it does not get the prefix. + What value to prefix the node name with. If the VarInfo has a predeclared name, it does not get the prefix. force Whether to attempt to overwrite existing names (possibly raising a ScopeError if they were different). By default, this is set to True to be more strict, so we see if the scoping algorithm failed to only @@ -210,7 +210,7 @@ def update(self, node: Node, prefix: str = "", force: bool = True): """ if force or node not in self.node: self.node[node] = self.node.enum(prefix + node.op_type.identifier) - for field, arr in node.outputs.get_vars().items(): + for field, arr in node.outputs.get_var_infos().items(): if arr._name is None: base = f"{self.node[node]}_{field}" name = self.var.maybe_enum(base) diff --git a/src/spox/_standard.py b/src/spox/_standard.py index ac519875..0dde0d34 100644 --- a/src/spox/_standard.py +++ b/src/spox/_standard.py @@ -5,7 +5,6 @@ from typing import TYPE_CHECKING, Callable -import numpy as np import onnx import onnx.reference import onnx.shape_inference @@ -19,7 +18,7 @@ from ._shape import SimpleShape from ._type_system import Optional, Sequence, Tensor, Type from ._utils import from_array -from ._value_prop import PropValueType +from ._value_prop import PropDict, PropValue, PropValueType if TYPE_CHECKING: from ._graph import Graph @@ -50,7 +49,11 @@ def min_output(self) -> int: return self.schema.min_output def to_singleton_onnx_model( - self, *, dummy_outputs: bool = True, with_dummy_subgraphs: bool = True + self, + *, + dummy_outputs: bool = True, + with_dummy_subgraphs: bool = True, + input_prop_values: PropDict = {}, ) -> tuple[onnx.ModelProto, Scope]: """ Build a singleton model consisting of just this StandardNode. Used for type inference. @@ -60,10 +63,10 @@ def to_singleton_onnx_model( # Prepare names for the values in scope of the node scope = Scope() scope.node[self] = "_this_" - for key, var in self.inputs.get_vars().items(): + for key, var in self.inputs.get_var_infos().items(): if var not in scope.var: scope.var[var] = key - for key, var in self.outputs.get_vars().items(): + for key, var in self.outputs.get_var_infos().items(): if var not in scope.var: scope.var[var] = key # We inject the evaluated attribute values here and then substitute back @@ -85,7 +88,7 @@ def to_singleton_onnx_model( # Input types input_info = [ var.unwrap_type()._to_onnx_value_info(key) - for key, var in self.inputs.get_vars().items() + for key, var in self.inputs.get_var_infos().items() ] # Output types with placeholder empty TypeProto (or actual type if not using dummies) @@ -95,15 +98,32 @@ def out_value_info(curr_key, curr_var): return curr_var.unwrap_type()._to_onnx_value_info(curr_key) output_info = [ - out_value_info(key, var) for key, var in self.outputs.get_vars().items() + out_value_info(key, var) + for key, var in self.outputs.get_var_infos().items() ] # Initializers, passed in to allow partial data propagation # - used so that operators like Reshape are aware of constant shapes - initializers = [ - from_array(var._value.value, key) - for key, var in self.inputs.get_vars().items() - if var._value and isinstance(var._value.value, np.ndarray) - ] + # TODO: fix this + + initializers = [] + + for name, prop in input_prop_values.items(): + if prop is None: + continue + elif not isinstance(prop, PropValue) or prop.value is None: + continue + elif isinstance(prop.type, Sequence): + initializers.extend( + [ + from_array(elem.value, f"{name}_{i}") + for i, elem in enumerate(prop.value) # type: ignore + if elem is not None + ] + ) + else: + initializers.append(from_array(prop.value, name)) # type: ignore + continue + # Graph and model graph = onnx.helper.make_graph( [node_proto], @@ -123,13 +143,13 @@ def out_value_info(curr_key, curr_var): ) return model, scope - def infer_output_types_onnx(self) -> dict[str, Type]: + def infer_output_types_onnx(self, input_prop_values: PropDict) -> dict[str, Type]: """Execute type & shape inference with ``onnx.shape_inference.infer_node_outputs``.""" # Check that all (specified) inputs have known types, as otherwise we fail - if any(var.type is None for var in self.inputs.get_vars().values()): + if any(var.type is None for var in self.inputs.get_var_infos().values()): return {} - model, _ = self.to_singleton_onnx_model() + model, _ = self.to_singleton_onnx_model(input_prop_values=input_prop_values) # Attempt to do shape inference - if an error is caught, we extend the traceback a bit try: @@ -153,26 +173,30 @@ def infer_output_types_onnx(self) -> dict[str, Type]: for key, type_ in results.items() } - def propagate_values_onnx(self) -> dict[str, PropValueType]: + def propagate_values_onnx( + self, input_prop_values: PropDict + ) -> dict[str, PropValueType]: """Perform value propagation by evaluating singleton model. The backend used for the propagation can be configured with the `spox._standard.ValuePropBackend` variable. """ # Cannot do propagation when some inputs were not propagated/inferred if any( - var.type is None or var._value is None - for var in self.inputs.get_vars().values() + var_info.type is None or input_prop_values.get(name, None) is None + for name, var_info in self.inputs.get_var_infos().items() ): return {} if next(iter(self.subgraphs), None) is not None: # Cannot do propagation with subgraphs implicitly for performance - should be reimplemented return {} - model, scope = self.to_singleton_onnx_model(with_dummy_subgraphs=False) + model, scope = self.to_singleton_onnx_model( + with_dummy_subgraphs=False, input_prop_values=input_prop_values + ) wrap_feed, run, unwrap_feed = _value_prop.get_backend_calls() input_feed = { - scope.var[var]: wrap_feed(var._value) - for var in self.inputs.get_vars().values() - if var._value + scope.var[var_info]: wrap_feed(input_prop_values[name]) + for name, var_info in self.inputs.get_var_infos().items() + if input_prop_values[name] } output_feed = run(model, input_feed) @@ -185,12 +209,12 @@ def propagate_values_onnx(self) -> dict[str, PropValueType]: } return {k: v for k, v in results.items() if k is not None} - def infer_output_types(self) -> dict[str, Type]: - return self.infer_output_types_onnx() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + return self.infer_output_types_onnx(input_prop_values) - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values) -> dict[str, PropValueType]: if _value_prop._VALUE_PROP_BACKEND != _value_prop.ValuePropBackend.NONE: - return self.propagate_values_onnx() + return self.propagate_values_onnx(input_prop_values) return {} diff --git a/src/spox/_value_prop.py b/src/spox/_value_prop.py index 300abccc..2e01f86c 100644 --- a/src/spox/_value_prop.py +++ b/src/spox/_value_prop.py @@ -4,8 +4,10 @@ import enum import logging import warnings +from collections.abc import Iterable from dataclasses import dataclass from typing import Callable, Union +from typing import Optional as tOptional import numpy as np import numpy.typing as npt @@ -24,9 +26,10 @@ - PropValue -> Optional, Some (has value) - None -> Optional, Nothing (no value) """ -PropValueType = Union[np.ndarray, list["PropValue"], "PropValue", None] -ORTValue = Union[np.ndarray, list, None] -RefValue = Union[np.ndarray, list, float, None] +PropValueType = Union[np.ndarray, Iterable[tOptional["PropValue"]], "PropValue", None] +PropDict = dict[str, Union[Iterable[tOptional["PropValue"]], "PropValue", None]] +ORTValue = Union[np.ndarray, Iterable, None] +RefValue = Union[np.ndarray, Iterable, float, None] VALUE_PROP_STRICT_CHECK: bool = False @@ -42,7 +45,7 @@ class ValuePropBackend(enum.Enum): @dataclass(frozen=True) class PropValue: - """Propagated value given to a Var, which has a run-time value known at compile-time. + """Propagated value given to a VarInfo, which has a run-time value known at compile-time. Wrapper for a few Python types which are used to represent values of ONNX types. diff --git a/src/spox/_var.py b/src/spox/_var.py index 15dd2186..e5756712 100644 --- a/src/spox/_var.py +++ b/src/spox/_var.py @@ -2,7 +2,8 @@ # SPDX-License-Identifier: BSD-3-Clause import typing -from typing import Any, Callable, ClassVar, Optional, TypeVar, Union +from collections.abc import Iterable, Sequence +from typing import Any, Callable, ClassVar, Optional, TypeVar, Union, overload import numpy as np @@ -21,6 +22,93 @@ def _not_impl(self, *_): add = sub = mul = truediv = floordiv = neg = and_ = or_ = xor = not_ = _not_impl +class _VarInfo: + """ + Internal information about a ``Var``. Should be mainly inaccessible for most uses of ``spox``. + + ``VarInfo`` should be treated as strictly immutable. + If a ``VarInfo`` or any of its fields are modified, the behaviour is undefined and the produced graph may be invalid. + """ + + type: Optional[_type_system.Type] + _op: "Node" + _name: Optional[str] + + def __init__( + self, + op: "Node", + type_: Optional[_type_system.Type], + ): + """The initializer of ``VarInfo`` is protected. Use operator constructors to construct them instead.""" + if type_ is not None and not isinstance(type_, _type_system.Type): + raise TypeError("The type field of a VarInfo must be a Spox Type.") + + self.type = type_ + self._op = op + self._name = None + + def _rename(self, name: Optional[str]): + """Mutates the internal state of the VarInfo, overriding its name as given.""" + self._name = name + + @property + def _which_output(self) -> Optional[str]: + """Return the name of the output field that this var is stored in under ``self._op``.""" + if self._op is None: + return None + op_outs = self._op.outputs.get_var_infos() + candidates = [key for key, var in op_outs.items() if var is self] + return candidates[0] if candidates else None + + def __repr__(self) -> str: + nm = repr(self._name) + " " if self._name is not None else "" + op_repr = self._op.get_op_repr() if self._op else "??" + which = self._which_output + is_unary = len(self._op.outputs) <= 1 if self._op else True + which_repr = "->??" if which is None else (f"->{which}" if is_unary else "") + return f"" + + def unwrap_type(self) -> _type_system.Type: + """ + Return the :class:`~spox.Type` of ``self``, unless it is unknown. + + Returns + ------- + _type_system.Type + The type of the VarInfo. + + Raises + ------ + TypeError + If ``type is None`` (the type of this ``VarInfo`` is unknown). + """ + if self.type is None: + raise TypeError( + "Cannot unwrap requested type for VarInfo, as it is unknown." + ) + return self.type + + def unwrap_tensor(self) -> _type_system.Tensor: + """Equivalent to ``self.unwrap_type().unwrap_tensor()``.""" + return self.unwrap_type().unwrap_tensor() + + def unwrap_sequence(self) -> _type_system.Sequence: + """Equivalent to ``self.unwrap_type().unwrap_sequence()``.""" + return self.unwrap_type().unwrap_sequence() + + def unwrap_optional(self) -> _type_system.Optional: + """Equivalent to ``self.unwrap_type().unwrap_optional()``.""" + return self.unwrap_type().unwrap_optional() + + def __copy__(self) -> "_VarInfo": + # Simply return `self` to ensure that "copies" are still equal + # during the build process + return self + + def __deepcopy__(self, _) -> "_VarInfo": + raise ValueError("'VarInfo' objects cannot be deepcopied.") + + class Var: """ Abstraction for a single ONNX value - like a tensor - that can be passed around in Python code. @@ -28,6 +116,8 @@ class Var: A ``Var`` represents some output of an operator. This operator is stored internally to allow reproducing the graph. + The ``VarInfo`` class holds all relevant information about a ``Var`` - like the ``type``. + The ``type`` field is inferred and checked by operators. It may be ``None`` if type inference failed, in which case it is unknown and should pass all type checks. However, untyped ``Var`` objects may not be used in some contexts. @@ -47,46 +137,26 @@ class Var: Should not be constructed directly - the main source of ``Var`` objects are operator constructors. """ - type: Optional[_type_system.Type] + _var_info: _VarInfo _value: Optional[_value_prop.PropValue] - _op: "Node" - _name: Optional[str] _operator_dispatcher: ClassVar[Any] = NotImplementedOperatorDispatcher() def __init__( self, - op: "Node", - type_: Optional[_type_system.Type], + var_info: _VarInfo, value: Optional[_value_prop.PropValue] = None, ): """The initializer of ``Var`` is protected. Use operator constructors to construct them instead.""" - if type_ is not None and not isinstance(type_, _type_system.Type): - raise TypeError("The type field of a Var must be a Spox Type.") if value is not None and not isinstance(value, _value_prop.PropValue): raise TypeError("The propagated value field of a Var must be a PropValue.") - if value is not None and value.type != type_: + if value is not None and value.type != var_info.type: raise ValueError( - f"The propagated value type ({value.type}) and actual Var type ({type_}) must be the same." + f"The propagated value type ({value.type}) and actual Var type ({var_info.type}) must be the same." ) - self.type = type_ + self._var_info = var_info self._value = value - self._op = op - self._name = None - - def _rename(self, name: Optional[str]): - """Mutates the internal state of the Var, overriding its name as given.""" - self._name = name - - @property - def _which_output(self) -> Optional[str]: - """Return the name of the output field that this var is stored in under ``self._op``.""" - if self._op is None: - return None - op_outs = self._op.outputs.get_vars() - candidates = [key for key, var in op_outs.items() if var is self] - return candidates[0] if candidates else None def _get_value(self) -> "_value_prop.ORTValue": """Get the propagated value in this Var and convert it to the ORT format. Raises if value is missing.""" @@ -135,6 +205,25 @@ def unwrap_optional(self) -> _type_system.Optional: """Equivalent to ``self.unwrap_type().unwrap_optional()``.""" return self.unwrap_type().unwrap_optional() + @property + def _op(self): + return self._var_info._op + + @property + def _name(self): + return self._var_info._name + + def _rename(self, name: Optional[str]): + self._var_info._rename(name) + + @property + def _which_output(self): + return self._var_info._which_output + + @property + def type(self): + return self._var_info.type + def __copy__(self) -> "Var": # Simply return `self` to ensure that "copies" are still equal # during the build process @@ -198,15 +287,97 @@ def __rxor__(self, other) -> "Var": return Var._operator_dispatcher.xor(other, self) +# we want unwrap to be type aware +T = TypeVar("T") + + +@overload +def wrap_vars(var_info: _VarInfo) -> Var: ... + + +@overload +def wrap_vars(var_info: Optional[_VarInfo]) -> Optional[Var]: ... + + +@overload +def wrap_vars(var_info: dict[T, _VarInfo]) -> dict[T, Var]: ... # type: ignore[misc] + + +@overload +def wrap_vars(var_info: Union[Sequence[_VarInfo], Iterable[_VarInfo]]) -> list[Var]: ... + + +def wrap_vars(var_info): + if var_info is None: + return None + elif isinstance(var_info, _VarInfo): + return Var(var_info) + elif isinstance(var_info, dict): + return {k: wrap_vars(v) for k, v in var_info.items()} + elif isinstance(var_info, (Iterable)): + return [wrap_vars(v) for v in var_info] + else: + raise ValueError("Unsupported type for wrap_vars") + + +@overload +def unwrap_vars(var: Var) -> _VarInfo: ... + + +@overload +def unwrap_vars(var: Optional[Var]) -> Optional[_VarInfo]: ... + + +@overload +def unwrap_vars(var: dict[T, Var]) -> dict[T, _VarInfo]: ... # type: ignore[misc] + + +@overload +def unwrap_vars(var: Union[Iterable[Var]]) -> list[_VarInfo]: ... + + +def unwrap_vars(var): + if var is None: + return None + elif isinstance(var, Var): + return var._var_info + elif isinstance(var, dict): + return {k: unwrap_vars(v) for k, v in var.items()} + elif isinstance(var, Iterable): + return [unwrap_vars(v) for v in var] + else: + raise ValueError("Unsupported type for unwrap_vars") + + def result_type( - *types: Union[Var, np.generic, int, float], + *types: Union[_VarInfo, np.generic, int, float], ) -> type[np.generic]: """Promote type for all given element types/values using ``np.result_type``.""" return np.dtype( np.result_type( *( - typ.unwrap_tensor().dtype if isinstance(typ, Var) else typ + typ.unwrap_tensor().dtype + if isinstance(typ, Var) or isinstance(typ, _VarInfo) + else typ for typ in types ) ) ).type + + +def create_prop_dict( + **kwargs: Union[Var, Sequence[Var], Optional[Var]], +) -> _value_prop.PropDict: + from ._fields import BaseVars + + flattened_vars = BaseVars(kwargs).flatten_vars() + + return { + key: ( + var._value + if isinstance(var, Var) + else {k: v._value for k, v in var.items()} + ) + for key, var in flattened_vars.items() + if var is not None + } diff --git a/src/spox/opset/ai/onnx/ml/v3.py b/src/spox/opset/ai/onnx/ml/v3.py index b34d4020..25c388ca 100644 --- a/src/spox/opset/ai/onnx/ml/v3.py +++ b/src/spox/opset/ai/onnx/ml/v3.py @@ -23,7 +23,8 @@ from spox._node import OpType from spox._standard import InferenceError, StandardNode from spox._type_system import Tensor, Type -from spox._var import Var +from spox._value_prop import PropDict +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars class _ArrayFeatureExtractor(StandardNode): @@ -33,14 +34,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} xt, yt = self.inputs.X.unwrap_tensor(), self.inputs.Y.unwrap_tensor() @@ -69,13 +70,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: return {"Y": self.inputs.X.type} if self.inputs.X.type is not None else {} op_type = OpType("Binarizer", "ai.onnx.ml", 1) @@ -94,11 +95,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("CastMap", "ai.onnx.ml", 1) @@ -117,13 +118,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} cats1, cats2 = self.attrs.cats_int64s, self.attrs.cats_strings @@ -150,11 +151,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("DictVectorizer", "ai.onnx.ml", 1) @@ -170,11 +171,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Sequence[Var] + X: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("FeatureVectorizer", "ai.onnx.ml", 1) @@ -193,13 +194,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} t = self.inputs.X.unwrap_tensor() @@ -256,11 +257,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LabelEncoder", "ai.onnx.ml", 2) @@ -281,12 +282,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("LinearClassifier", "ai.onnx.ml", 1) @@ -305,13 +306,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} sim = self.inputs.X.unwrap_tensor().shape @@ -339,13 +340,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.attrs.norm.value not in ("MAX", "L1", "L2"): raise InferenceError( f"Unknown normalisation method `{self.attrs.norm.value}`" @@ -368,13 +369,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} if self.attrs.cats_int64s: @@ -412,12 +413,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("SVMClassifier", "ai.onnx.ml", 1) @@ -440,11 +441,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("SVMRegressor", "ai.onnx.ml", 1) @@ -461,13 +462,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.inputs.X.type is None: return {} sc, off = self.attrs.scale, self.attrs.offset @@ -520,14 +521,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: e = ( len(self.attrs.class_ids.value) if self.attrs.class_ids is not None @@ -585,13 +586,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.inputs.fully_typed: shape = self.inputs.X.unwrap_tensor().shape assert shape is not None # already checked with fully_typed @@ -619,11 +620,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("ZipMap", "ai.onnx.ml", 1) @@ -662,13 +663,21 @@ def array_feature_extractor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _ArrayFeatureExtractor( - _ArrayFeatureExtractor.Attributes(), - _ArrayFeatureExtractor.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + return ( + _ArrayFeatureExtractor( + _ArrayFeatureExtractor.Attributes(), + _ArrayFeatureExtractor.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) def binarizer( @@ -702,14 +711,21 @@ def binarizer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Binarizer( - _Binarizer.Attributes( - threshold=AttrFloat32(threshold, name="threshold"), - ), - _Binarizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Binarizer( + _Binarizer.Attributes( + threshold=AttrFloat32(threshold, name="threshold"), + ), + _Binarizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def cast_map( @@ -759,16 +775,23 @@ def cast_map( - T1: `map(int64,tensor(float))`, `map(int64,tensor(string))` - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _CastMap( - _CastMap.Attributes( - cast_to=AttrString(cast_to, name="cast_to"), - map_form=AttrString(map_form, name="map_form"), - max_map=AttrInt64(max_map, name="max_map"), - ), - _CastMap.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _CastMap( + _CastMap.Attributes( + cast_to=AttrString(cast_to, name="cast_to"), + map_form=AttrString(map_form, name="map_form"), + max_map=AttrInt64(max_map, name="max_map"), + ), + _CastMap.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def category_mapper( @@ -826,17 +849,24 @@ def category_mapper( - T1: `tensor(int64)`, `tensor(string)` - T2: `tensor(int64)`, `tensor(string)` """ - return _CategoryMapper( - _CategoryMapper.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - ), - _CategoryMapper.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _CategoryMapper( + _CategoryMapper.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + ), + _CategoryMapper.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def dict_vectorizer( @@ -889,19 +919,26 @@ def dict_vectorizer( - T1: `map(int64,tensor(double))`, `map(int64,tensor(float))`, `map(int64,tensor(string))`, `map(string,tensor(double))`, `map(string,tensor(float))`, `map(string,tensor(int64))` - T2: `tensor(double)`, `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _DictVectorizer( - _DictVectorizer.Attributes( - int64_vocabulary=AttrInt64s.maybe( - int64_vocabulary, name="int64_vocabulary" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _DictVectorizer( + _DictVectorizer.Attributes( + int64_vocabulary=AttrInt64s.maybe( + int64_vocabulary, name="int64_vocabulary" + ), + string_vocabulary=AttrStrings.maybe( + string_vocabulary, name="string_vocabulary" + ), ), - string_vocabulary=AttrStrings.maybe( - string_vocabulary, name="string_vocabulary" + _DictVectorizer.Inputs( + X=unwrap_vars(X), ), - ), - _DictVectorizer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def feature_vectorizer( @@ -938,14 +975,23 @@ def feature_vectorizer( Type constraints: - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _FeatureVectorizer( - _FeatureVectorizer.Attributes( - inputdimensions=AttrInt64s.maybe(inputdimensions, name="inputdimensions"), - ), - _FeatureVectorizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _FeatureVectorizer( + _FeatureVectorizer.Attributes( + inputdimensions=AttrInt64s.maybe( + inputdimensions, name="inputdimensions" + ), + ), + _FeatureVectorizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def imputer( @@ -1003,25 +1049,32 @@ def imputer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Imputer( - _Imputer.Attributes( - imputed_value_floats=AttrFloat32s.maybe( - imputed_value_floats, name="imputed_value_floats" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Imputer( + _Imputer.Attributes( + imputed_value_floats=AttrFloat32s.maybe( + imputed_value_floats, name="imputed_value_floats" + ), + imputed_value_int64s=AttrInt64s.maybe( + imputed_value_int64s, name="imputed_value_int64s" + ), + replaced_value_float=AttrFloat32( + replaced_value_float, name="replaced_value_float" + ), + replaced_value_int64=AttrInt64( + replaced_value_int64, name="replaced_value_int64" + ), ), - imputed_value_int64s=AttrInt64s.maybe( - imputed_value_int64s, name="imputed_value_int64s" + _Imputer.Inputs( + X=unwrap_vars(X), ), - replaced_value_float=AttrFloat32( - replaced_value_float, name="replaced_value_float" - ), - replaced_value_int64=AttrInt64( - replaced_value_int64, name="replaced_value_int64" - ), - ), - _Imputer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def label_encoder( @@ -1104,22 +1157,29 @@ def label_encoder( - T1: `tensor(float)`, `tensor(int64)`, `tensor(string)` - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - ), - _LabelEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + ), + _LabelEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def linear_classifier( @@ -1179,23 +1239,30 @@ def linear_classifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _LinearClassifier( - _LinearClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LinearClassifier( + _LinearClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe( + classlabels_ints, name="classlabels_ints" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + coefficients=AttrFloat32s(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + multi_class=AttrInt64(multi_class, name="multi_class"), + post_transform=AttrString(post_transform, name="post_transform"), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _LinearClassifier.Inputs( + X=unwrap_vars(X), ), - coefficients=AttrFloat32s(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - multi_class=AttrInt64(multi_class, name="multi_class"), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _LinearClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def linear_regressor( @@ -1247,17 +1314,24 @@ def linear_regressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _LinearRegressor( - _LinearRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - post_transform=AttrString(post_transform, name="post_transform"), - targets=AttrInt64(targets, name="targets"), - ), - _LinearRegressor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LinearRegressor( + _LinearRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + post_transform=AttrString(post_transform, name="post_transform"), + targets=AttrInt64(targets, name="targets"), + ), + _LinearRegressor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def normalizer( @@ -1296,14 +1370,21 @@ def normalizer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Normalizer( - _Normalizer.Attributes( - norm=AttrString(norm, name="norm"), - ), - _Normalizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Normalizer( + _Normalizer.Attributes( + norm=AttrString(norm, name="norm"), + ), + _Normalizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def one_hot_encoder( @@ -1355,16 +1436,23 @@ def one_hot_encoder( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _OneHotEncoder( - _OneHotEncoder.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - zeros=AttrInt64(zeros, name="zeros"), - ), - _OneHotEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _OneHotEncoder( + _OneHotEncoder.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + zeros=AttrInt64(zeros, name="zeros"), + ), + _OneHotEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def svmclassifier( @@ -1449,30 +1537,39 @@ def svmclassifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _SVMClassifier( - _SVMClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _SVMClassifier( + _SVMClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe( + classlabels_ints, name="classlabels_ints" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + post_transform=AttrString(post_transform, name="post_transform"), + prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), + prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe( + support_vectors, name="support_vectors" + ), + vectors_per_class=AttrInt64s.maybe( + vectors_per_class, name="vectors_per_class" + ), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _SVMClassifier.Inputs( + X=unwrap_vars(X), ), - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - post_transform=AttrString(post_transform, name="post_transform"), - prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), - prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), - vectors_per_class=AttrInt64s.maybe( - vectors_per_class, name="vectors_per_class" - ), - ), - _SVMClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def svmregressor( @@ -1536,21 +1633,30 @@ def svmregressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _SVMRegressor( - _SVMRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - n_supports=AttrInt64(n_supports, name="n_supports"), - one_class=AttrInt64(one_class, name="one_class"), - post_transform=AttrString(post_transform, name="post_transform"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), - ), - _SVMRegressor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _SVMRegressor( + _SVMRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + n_supports=AttrInt64(n_supports, name="n_supports"), + one_class=AttrInt64(one_class, name="one_class"), + post_transform=AttrString(post_transform, name="post_transform"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe( + support_vectors, name="support_vectors" + ), + ), + _SVMRegressor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def scaler( @@ -1592,15 +1698,22 @@ def scaler( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Scaler( - _Scaler.Attributes( - offset=AttrFloat32s.maybe(offset, name="offset"), - scale=AttrFloat32s.maybe(scale, name="scale"), - ), - _Scaler.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Scaler( + _Scaler.Attributes( + offset=AttrFloat32s.maybe(offset, name="offset"), + scale=AttrFloat32s.maybe(scale, name="scale"), + ), + _Scaler.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def tree_ensemble_classifier( @@ -1737,54 +1850,64 @@ def tree_ensemble_classifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _TreeEnsembleClassifier( - _TreeEnsembleClassifier.Attributes( - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), - class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), - class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), - class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), - class_weights_as_tensor=AttrTensor.maybe( - class_weights_as_tensor, name="class_weights_as_tensor" - ), - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _TreeEnsembleClassifier( + _TreeEnsembleClassifier.Attributes( + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe( + base_values_as_tensor, name="base_values_as_tensor" + ), + class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), + class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), + class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), + class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), + class_weights_as_tensor=AttrTensor.maybe( + class_weights_as_tensor, name="class_weights_as_tensor" + ), + classlabels_int64s=AttrInt64s.maybe( + classlabels_int64s, name="classlabels_int64s" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + nodes_falsenodeids=AttrInt64s.maybe( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s.maybe( + nodes_featureids, name="nodes_featureids" + ), + nodes_hitrates=AttrFloat32s.maybe( + nodes_hitrates, name="nodes_hitrates" + ), + nodes_hitrates_as_tensor=AttrTensor.maybe( + nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" + ), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe( + nodes_truenodeids, name="nodes_truenodeids" + ), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe( + nodes_values_as_tensor, name="nodes_values_as_tensor" + ), + post_transform=AttrString(post_transform, name="post_transform"), ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" + _TreeEnsembleClassifier.Inputs( + X=unwrap_vars(X), ), - nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _TreeEnsembleClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def tree_ensemble_regressor( @@ -1919,52 +2042,64 @@ def tree_ensemble_regressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _TreeEnsembleRegressor( - _TreeEnsembleRegressor.Attributes( - aggregate_function=AttrString( - aggregate_function, name="aggregate_function" - ), - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _TreeEnsembleRegressor( + _TreeEnsembleRegressor.Attributes( + aggregate_function=AttrString( + aggregate_function, name="aggregate_function" + ), + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe( + base_values_as_tensor, name="base_values_as_tensor" + ), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falsenodeids=AttrInt64s.maybe( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s.maybe( + nodes_featureids, name="nodes_featureids" + ), + nodes_hitrates=AttrFloat32s.maybe( + nodes_hitrates, name="nodes_hitrates" + ), + nodes_hitrates_as_tensor=AttrTensor.maybe( + nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" + ), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe( + nodes_truenodeids, name="nodes_truenodeids" + ), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe( + nodes_values_as_tensor, name="nodes_values_as_tensor" + ), + post_transform=AttrString(post_transform, name="post_transform"), + target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), + target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), + target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), + target_weights=AttrFloat32s.maybe( + target_weights, name="target_weights" + ), + target_weights_as_tensor=AttrTensor.maybe( + target_weights_as_tensor, name="target_weights_as_tensor" + ), ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" + _TreeEnsembleRegressor.Inputs( + X=unwrap_vars(X), ), - nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), - target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), - target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), - target_weights=AttrFloat32s.maybe(target_weights, name="target_weights"), - target_weights_as_tensor=AttrTensor.maybe( - target_weights_as_tensor, name="target_weights_as_tensor" - ), - ), - _TreeEnsembleRegressor.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def zip_map( @@ -2007,19 +2142,26 @@ def zip_map( Type constraints: - T: `seq(map(int64,tensor(float)))`, `seq(map(string,tensor(float)))` """ - return _ZipMap( - _ZipMap.Attributes( - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _ZipMap( + _ZipMap.Attributes( + classlabels_int64s=AttrInt64s.maybe( + classlabels_int64s, name="classlabels_int64s" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _ZipMap.Inputs( + X=unwrap_vars(X), ), - ), - _ZipMap.Inputs( - X=X, - ), - ).outputs.Z + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/ml/v4.py b/src/spox/opset/ai/onnx/ml/v4.py index 9e51382c..9f9fdf90 100644 --- a/src/spox/opset/ai/onnx/ml/v4.py +++ b/src/spox/opset/ai/onnx/ml/v4.py @@ -22,7 +22,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.ml.v3 import ( _ArrayFeatureExtractor, _Binarizer, @@ -79,11 +79,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LabelEncoder", "ai.onnx.ml", 4) @@ -191,25 +191,32 @@ def label_encoder( - T1: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` - T2: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), - ), - _LabelEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), + ), + _LabelEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/ml/v5.py b/src/spox/opset/ai/onnx/ml/v5.py index 100bf179..4b073e70 100644 --- a/src/spox/opset/ai/onnx/ml/v5.py +++ b/src/spox/opset/ai/onnx/ml/v5.py @@ -18,7 +18,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.ml.v4 import ( _ArrayFeatureExtractor, _Binarizer, @@ -77,11 +77,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("TreeEnsemble", "ai.onnx.ml", 5) @@ -224,35 +224,47 @@ def tree_ensemble( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _TreeEnsemble( - _TreeEnsemble.Attributes( - aggregate_function=AttrInt64(aggregate_function, name="aggregate_function"), - leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), - leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), - membership_values=AttrTensor.maybe( - membership_values, name="membership_values" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _TreeEnsemble( + _TreeEnsemble.Attributes( + aggregate_function=AttrInt64( + aggregate_function, name="aggregate_function" + ), + leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), + leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), + membership_values=AttrTensor.maybe( + membership_values, name="membership_values" + ), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), + nodes_falsenodeids=AttrInt64s( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), + nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), + nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), + nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), + nodes_truenodeids=AttrInt64s( + nodes_truenodeids, name="nodes_truenodeids" + ), + post_transform=AttrInt64(post_transform, name="post_transform"), + tree_roots=AttrInt64s(tree_roots, name="tree_roots"), ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), - nodes_falsenodeids=AttrInt64s( - nodes_falsenodeids, name="nodes_falsenodeids" + _TreeEnsemble.Inputs( + X=unwrap_vars(X), ), - nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), - nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), - nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), - nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), - nodes_truenodeids=AttrInt64s(nodes_truenodeids, name="nodes_truenodeids"), - post_transform=AttrInt64(post_transform, name="post_transform"), - tree_roots=AttrInt64s(tree_roots, name="tree_roots"), - ), - _TreeEnsemble.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/v17.py b/src/spox/opset/ai/onnx/v17.py index f9cbd0bf..036c6f75 100644 --- a/src/spox/opset/ai/onnx/v17.py +++ b/src/spox/opset/ai/onnx/v17.py @@ -31,8 +31,13 @@ from spox._standard import InferenceError, StandardNode from spox._type_system import Sequence as SpoxSequence from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import ( + Var, + _VarInfo, + create_prop_dict, + unwrap_vars, +) class _Abs(StandardNode): @@ -42,11 +47,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Abs", "", 13) @@ -62,11 +67,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Acos", "", 7) @@ -82,11 +87,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Acosh", "", 9) @@ -102,12 +107,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Add", "", 14) @@ -123,12 +128,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("And", "", 7) @@ -146,11 +151,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ArgMax", "", 13) @@ -168,11 +173,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ArgMin", "", 13) @@ -188,11 +193,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Asin", "", 7) @@ -208,11 +213,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Asinh", "", 9) @@ -228,11 +233,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Atan", "", 7) @@ -248,11 +253,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Atanh", "", 9) @@ -273,11 +278,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("AveragePool", "", 11) @@ -295,17 +300,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - B: Var - input_mean: Var - input_var: Var + X: _VarInfo + scale: _VarInfo + B: _VarInfo + input_mean: _VarInfo + input_var: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - running_mean: Optional[Var] - running_var: Optional[Var] + Y: _VarInfo + running_mean: Optional[_VarInfo] + running_var: Optional[_VarInfo] op_type = OpType("BatchNormalization", "", 15) @@ -322,11 +327,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Bernoulli", "", 15) @@ -342,12 +347,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("BitShift", "", 11) @@ -364,11 +369,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("BlackmanWindow", "", 17) @@ -384,11 +389,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 13) @@ -404,12 +409,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 15) @@ -425,11 +430,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Ceil", "", 13) @@ -445,11 +450,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Celu", "", 12) @@ -465,13 +470,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - min: Optional[Var] - max: Optional[Var] + input: _VarInfo + min: Optional[_VarInfo] + max: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Clip", "", 13) @@ -487,15 +492,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - condition: Var + input: _VarInfo + condition: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def infer_output_types(self) -> dict[str, Type]: - self.infer_output_types_onnx() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + self.infer_output_types_onnx(input_prop_values) inp, cond = ( self.inputs.input.unwrap_tensor(), self.inputs.condition.unwrap_tensor(), @@ -534,11 +539,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - concat_result: Var + concat_result: _VarInfo op_type = OpType("Concat", "", 13) @@ -555,11 +560,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var + input_sequence: _VarInfo @dataclass class Outputs(BaseOutputs): - concat_result: Var + concat_result: _VarInfo op_type = OpType("ConcatFromSequence", "", 11) @@ -583,9 +588,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -625,11 +630,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 9) @@ -650,13 +655,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - B: Optional[Var] + X: _VarInfo + W: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Conv", "", 11) @@ -677,14 +682,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - w: Var - x_zero_point: Optional[Var] - w_zero_point: Optional[Var] + x: _VarInfo + w: _VarInfo + x_zero_point: Optional[_VarInfo] + w_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("ConvInteger", "", 10) @@ -707,13 +712,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - B: Optional[Var] + X: _VarInfo + W: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ConvTranspose", "", 11) @@ -729,11 +734,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cos", "", 7) @@ -749,11 +754,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cosh", "", 9) @@ -770,12 +775,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - axis: Var + x: _VarInfo + axis: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("CumSum", "", 14) @@ -793,12 +798,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - dft_length: Optional[Var] + input: _VarInfo + dft_length: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DFT", "", 17) @@ -815,11 +820,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DepthToSpace", "", 13) @@ -835,13 +840,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 13) @@ -857,11 +862,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Det", "", 11) @@ -877,12 +882,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Div", "", 14) @@ -898,14 +903,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - ratio: Optional[Var] - training_mode: Optional[Var] + data: _VarInfo + ratio: Optional[_VarInfo] + training_mode: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var - mask: Optional[Var] + output: _VarInfo + mask: Optional[_VarInfo] op_type = OpType("Dropout", "", 13) @@ -921,13 +926,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var + x: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var - y_scale: Var - y_zero_point: Var + y: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo op_type = OpType("DynamicQuantizeLinear", "", 11) @@ -943,11 +948,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - Inputs: Sequence[Var] + Inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - Output: Var + Output: _VarInfo op_type = OpType("Einsum", "", 12) @@ -963,11 +968,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Elu", "", 6) @@ -983,12 +988,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Equal", "", 13) @@ -1004,11 +1009,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Erf", "", 13) @@ -1024,11 +1029,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Exp", "", 13) @@ -1044,12 +1049,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - shape: Var + input: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Expand", "", 13) @@ -1066,11 +1071,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("EyeLike", "", 9) @@ -1086,11 +1091,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Flatten", "", 13) @@ -1106,11 +1111,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Floor", "", 13) @@ -1133,17 +1138,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] op_type = OpType("GRU", "", 14) @@ -1159,12 +1164,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Gather", "", 13) @@ -1180,12 +1185,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("GatherElements", "", 13) @@ -1201,12 +1206,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("GatherND", "", 13) @@ -1225,13 +1230,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var - C: Optional[Var] + A: _VarInfo + B: _VarInfo + C: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Gemm", "", 13) @@ -1247,11 +1252,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalAveragePool", "", 1) @@ -1267,11 +1272,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalLpPool", "", 2) @@ -1287,11 +1292,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalMaxPool", "", 1) @@ -1307,12 +1312,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Greater", "", 13) @@ -1328,12 +1333,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("GreaterOrEqual", "", 16) @@ -1351,12 +1356,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - grid: Var + X: _VarInfo + grid: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GridSample", "", 16) @@ -1373,11 +1378,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("HammingWindow", "", 17) @@ -1394,11 +1399,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("HannWindow", "", 17) @@ -1415,11 +1420,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("HardSigmoid", "", 6) @@ -1435,11 +1440,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("HardSwish", "", 14) @@ -1455,11 +1460,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Hardmax", "", 13) @@ -1475,11 +1480,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 16) @@ -1496,11 +1501,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 16) @@ -1516,13 +1521,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - scale: Var - B: Var + input: _VarInfo + scale: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("InstanceNormalization", "", 6) @@ -1539,11 +1544,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsInf", "", 10) @@ -1559,11 +1564,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsNaN", "", 13) @@ -1582,11 +1587,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LRN", "", 13) @@ -1609,20 +1614,20 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] - initial_c: Optional[Var] - P: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] + initial_c: Optional[_VarInfo] + P: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] - Y_c: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] + Y_c: Optional[_VarInfo] op_type = OpType("LSTM", "", 14) @@ -1640,15 +1645,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Scale: Var - B: Optional[Var] + X: _VarInfo + Scale: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var - Mean: Optional[Var] - InvStdDev: Optional[Var] + Y: _VarInfo + Mean: Optional[_VarInfo] + InvStdDev: Optional[_VarInfo] op_type = OpType("LayerNormalization", "", 17) @@ -1664,11 +1669,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LeakyRelu", "", 16) @@ -1684,12 +1689,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Less", "", 13) @@ -1705,12 +1710,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("LessOrEqual", "", 16) @@ -1726,11 +1731,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Log", "", 13) @@ -1746,11 +1751,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("LogSoftmax", "", 13) @@ -1766,21 +1771,21 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] - def infer_output_types(self) -> dict[str, Type]: - output_types = super().infer_output_types() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + output_types = super().infer_output_types({}) body = self.attrs.body.value n = len(body.requested_arguments) - 2 - carried_names = list(self.outputs.get_vars())[:n] + carried_names = list(self.outputs.get_var_infos())[:n] carried_types = [v.type for v in list(body.requested_results.values())[1:][:n]] for name, typ in zip(carried_names, carried_types): @@ -1803,11 +1808,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("LpNormalization", "", 1) @@ -1827,11 +1832,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LpPool", "", 11) @@ -1847,12 +1852,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MatMul", "", 13) @@ -1868,14 +1873,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var - a_zero_point: Optional[Var] - b_zero_point: Optional[Var] + A: _VarInfo + B: _VarInfo + a_zero_point: Optional[_VarInfo] + b_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MatMulInteger", "", 10) @@ -1891,11 +1896,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - max: Var + max: _VarInfo op_type = OpType("Max", "", 13) @@ -1917,12 +1922,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Indices: Optional[Var] + Y: _VarInfo + Indices: Optional[_VarInfo] op_type = OpType("MaxPool", "", 12) @@ -1939,12 +1944,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - rois: Var + X: _VarInfo + rois: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MaxRoiPool", "", 1) @@ -1962,13 +1967,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - I: Var - output_shape: Optional[Var] + X: _VarInfo + I: _VarInfo + output_shape: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("MaxUnpool", "", 11) @@ -1984,11 +1989,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - mean: Var + mean: _VarInfo op_type = OpType("Mean", "", 13) @@ -2004,11 +2009,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MeanVarianceNormalization", "", 13) @@ -2024,15 +2029,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - num_mel_bins: Var - dft_length: Var - sample_rate: Var - lower_edge_hertz: Var - upper_edge_hertz: Var + num_mel_bins: _VarInfo + dft_length: _VarInfo + sample_rate: _VarInfo + lower_edge_hertz: _VarInfo + upper_edge_hertz: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("MelWeightMatrix", "", 17) @@ -2048,11 +2053,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - min: Var + min: _VarInfo op_type = OpType("Min", "", 13) @@ -2068,12 +2073,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Mod", "", 13) @@ -2089,12 +2094,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Mul", "", 14) @@ -2112,11 +2117,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Multinomial", "", 7) @@ -2132,11 +2137,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Neg", "", 13) @@ -2153,13 +2158,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target: Var - weight: Optional[Var] + input: _VarInfo + target: _VarInfo + weight: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - loss: Var + loss: _VarInfo op_type = OpType("NegativeLogLikelihoodLoss", "", 13) @@ -2175,15 +2180,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - boxes: Var - scores: Var - max_output_boxes_per_class: Optional[Var] - iou_threshold: Optional[Var] - score_threshold: Optional[Var] + boxes: _VarInfo + scores: _VarInfo + max_output_boxes_per_class: Optional[_VarInfo] + iou_threshold: Optional[_VarInfo] + score_threshold: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - selected_indices: Var + selected_indices: _VarInfo op_type = OpType("NonMaxSuppression", "", 11) @@ -2199,11 +2204,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("NonZero", "", 13) @@ -2219,11 +2224,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Not", "", 1) @@ -2239,13 +2244,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - indices: Var - depth: Var - values: Var + indices: _VarInfo + depth: _VarInfo + values: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OneHot", "", 11) @@ -2261,11 +2266,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Optional[Var] + input: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Optional", "", 15) @@ -2281,11 +2286,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalGetElement", "", 15) @@ -2301,11 +2306,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalHasElement", "", 15) @@ -2321,12 +2326,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Or", "", 7) @@ -2342,12 +2347,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - slope: Var + X: _VarInfo + slope: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("PRelu", "", 16) @@ -2363,13 +2368,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 13) @@ -2385,12 +2390,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("Pow", "", 15) @@ -2411,19 +2416,19 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Var - w: Var - w_scale: Var - w_zero_point: Var - y_scale: Var - y_zero_point: Var - B: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: _VarInfo + w: _VarInfo + w_scale: _VarInfo + w_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearConv", "", 10) @@ -2439,18 +2444,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - a: Var - a_scale: Var - a_zero_point: Var - b: Var - b_scale: Var - b_zero_point: Var - y_scale: Var - y_zero_point: Var + a: _VarInfo + a_scale: _VarInfo + a_zero_point: _VarInfo + b: _VarInfo + b_scale: _VarInfo + b_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearMatMul", "", 10) @@ -2466,13 +2471,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 13) @@ -2494,17 +2499,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] op_type = OpType("RNN", "", 14) @@ -2526,7 +2531,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomNormal", "", 1) @@ -2545,11 +2550,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomNormalLike", "", 1) @@ -2571,7 +2576,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomUniform", "", 1) @@ -2590,11 +2595,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomUniformLike", "", 1) @@ -2610,13 +2615,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - start: Var - limit: Var - delta: Var + start: _VarInfo + limit: _VarInfo + delta: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Range", "", 11) @@ -2632,11 +2637,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Reciprocal", "", 13) @@ -2653,11 +2658,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL1", "", 13) @@ -2674,11 +2679,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL2", "", 13) @@ -2695,11 +2700,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSum", "", 13) @@ -2716,11 +2721,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSumExp", "", 13) @@ -2737,11 +2742,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 13) @@ -2758,11 +2763,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMean", "", 13) @@ -2779,11 +2784,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 13) @@ -2800,11 +2805,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceProd", "", 13) @@ -2821,12 +2826,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSum", "", 13) @@ -2843,11 +2848,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSumSquare", "", 13) @@ -2863,11 +2868,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Relu", "", 14) @@ -2883,12 +2888,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 14) @@ -2909,14 +2914,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 13) @@ -2933,12 +2938,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - sequence_lens: Var + input: _VarInfo + sequence_lens: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ReverseSequence", "", 10) @@ -2959,13 +2964,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - rois: Var - batch_indices: Var + X: _VarInfo + rois: _VarInfo + batch_indices: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("RoiAlign", "", 16) @@ -2981,11 +2986,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Round", "", 11) @@ -3001,14 +3006,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - signal: Var - frame_step: Var - window: Optional[Var] - frame_length: Optional[Var] + signal: _VarInfo + frame_step: _VarInfo + window: Optional[_VarInfo] + frame_length: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("STFT", "", 17) @@ -3029,11 +3034,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 16) @@ -3050,13 +3055,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterElements", "", 16) @@ -3072,13 +3077,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterND", "", 16) @@ -3095,11 +3100,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Selu", "", 6) @@ -3115,12 +3120,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - position: Var + input_sequence: _VarInfo + position: _VarInfo @dataclass class Outputs(BaseOutputs): - tensor: Var + tensor: _VarInfo op_type = OpType("SequenceAt", "", 11) @@ -3136,11 +3141,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceConstruct", "", 11) @@ -3158,7 +3163,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("SequenceEmpty", "", 11) @@ -3174,12 +3179,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - position: Optional[Var] + input_sequence: _VarInfo + position: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceErase", "", 11) @@ -3195,13 +3200,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - tensor: Var - position: Optional[Var] + input_sequence: _VarInfo + tensor: _VarInfo + position: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceInsert", "", 11) @@ -3217,11 +3222,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var + input_sequence: _VarInfo @dataclass class Outputs(BaseOutputs): - length: Var + length: _VarInfo op_type = OpType("SequenceLength", "", 11) @@ -3237,12 +3242,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - additional_inputs: Sequence[Var] + input_sequence: _VarInfo + additional_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - out_sequence: Sequence[Var] + out_sequence: Sequence[_VarInfo] op_type = OpType("SequenceMap", "", 17) @@ -3259,11 +3264,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 15) @@ -3280,11 +3285,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Shrink", "", 9) @@ -3300,11 +3305,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Sigmoid", "", 13) @@ -3320,11 +3325,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sign", "", 13) @@ -3340,11 +3345,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sin", "", 7) @@ -3360,11 +3365,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sinh", "", 9) @@ -3380,11 +3385,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 13) @@ -3400,15 +3405,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - starts: Var - ends: Var - axes: Optional[Var] - steps: Optional[Var] + data: _VarInfo + starts: _VarInfo + ends: _VarInfo + axes: Optional[_VarInfo] + steps: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Slice", "", 13) @@ -3424,11 +3429,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Softmax", "", 13) @@ -3445,14 +3450,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - scores: Var - labels: Var - weights: Optional[Var] + scores: _VarInfo + labels: _VarInfo + weights: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var - log_prob: Optional[Var] + output: _VarInfo + log_prob: Optional[_VarInfo] op_type = OpType("SoftmaxCrossEntropyLoss", "", 13) @@ -3468,11 +3473,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Softplus", "", 1) @@ -3488,11 +3493,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Softsign", "", 1) @@ -3508,11 +3513,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("SpaceToDepth", "", 13) @@ -3528,12 +3533,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Split", "", 13) @@ -3550,12 +3555,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SplitToSequence", "", 11) @@ -3571,11 +3576,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Sqrt", "", 13) @@ -3591,12 +3596,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 13) @@ -3615,11 +3620,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("StringNormalizer", "", 10) @@ -3635,12 +3640,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Sub", "", 14) @@ -3656,11 +3661,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - sum: Var + sum: _VarInfo op_type = OpType("Sum", "", 13) @@ -3676,11 +3681,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tan", "", 7) @@ -3696,11 +3701,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tanh", "", 13) @@ -3724,11 +3729,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("TfIdfVectorizer", "", 9) @@ -3744,11 +3749,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ThresholdedRelu", "", 10) @@ -3764,12 +3769,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - repeats: Var + input: _VarInfo + repeats: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tile", "", 13) @@ -3787,13 +3792,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - K: Var + X: _VarInfo + K: _VarInfo @dataclass class Outputs(BaseOutputs): - Values: Var - Indices: Var + Values: _VarInfo + Indices: _VarInfo op_type = OpType("TopK", "", 11) @@ -3809,11 +3814,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - transposed: Var + transposed: _VarInfo op_type = OpType("Transpose", "", 13) @@ -3829,12 +3834,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - k: Optional[Var] + input: _VarInfo + k: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Trilu", "", 14) @@ -3851,14 +3856,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - indices: Optional[Var] - inverse_indices: Optional[Var] - counts: Optional[Var] + Y: _VarInfo + indices: Optional[_VarInfo] + inverse_indices: Optional[_VarInfo] + counts: Optional[_VarInfo] op_type = OpType("Unique", "", 11) @@ -3874,12 +3879,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Var + data: _VarInfo + axes: _VarInfo @dataclass class Outputs(BaseOutputs): - expanded: Var + expanded: _VarInfo op_type = OpType("Unsqueeze", "", 13) @@ -3895,13 +3900,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - condition: Var - X: Var - Y: Var + condition: _VarInfo + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Where", "", 16) @@ -3917,12 +3922,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Xor", "", 7) @@ -3958,12 +3963,19 @@ def abs( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Abs( - _Abs.Attributes(), - _Abs.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Abs( + _Abs.Attributes(), + _Abs.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def acos( @@ -3992,12 +4004,19 @@ def acos( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Acos( - _Acos.Attributes(), - _Acos.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Acos( + _Acos.Attributes(), + _Acos.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def acosh( @@ -4027,12 +4046,19 @@ def acosh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Acosh( - _Acosh.Attributes(), - _Acosh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Acosh( + _Acosh.Attributes(), + _Acosh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def add( @@ -4072,13 +4098,21 @@ def add( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Add( - _Add.Attributes(), - _Add.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Add( + _Add.Attributes(), + _Add.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def and_( @@ -4117,13 +4151,21 @@ def and_( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _And( - _And.Attributes(), - _And.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _And( + _And.Attributes(), + _And.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def arg_max( @@ -4174,16 +4216,25 @@ def arg_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ArgMax( - _ArgMax.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64(select_last_index, name="select_last_index"), - ), - _ArgMax.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ArgMax( + _ArgMax.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64( + select_last_index, name="select_last_index" + ), + ), + _ArgMax.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def arg_min( @@ -4234,16 +4285,25 @@ def arg_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ArgMin( - _ArgMin.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64(select_last_index, name="select_last_index"), - ), - _ArgMin.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ArgMin( + _ArgMin.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64( + select_last_index, name="select_last_index" + ), + ), + _ArgMin.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def asin( @@ -4272,12 +4332,19 @@ def asin( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Asin( - _Asin.Attributes(), - _Asin.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Asin( + _Asin.Attributes(), + _Asin.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def asinh( @@ -4306,12 +4373,19 @@ def asinh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Asinh( - _Asinh.Attributes(), - _Asinh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Asinh( + _Asinh.Attributes(), + _Asinh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def atan( @@ -4340,12 +4414,19 @@ def atan( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Atan( - _Atan.Attributes(), - _Atan.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Atan( + _Atan.Attributes(), + _Atan.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def atanh( @@ -4375,12 +4456,19 @@ def atanh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Atanh( - _Atanh.Attributes(), - _Atanh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Atanh( + _Atanh.Attributes(), + _Atanh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def average_pool( @@ -4504,19 +4592,28 @@ def average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64( + count_include_pad, name="count_include_pad" + ), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _AveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def batch_normalization( @@ -4639,20 +4736,31 @@ def batch_normalization( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _BatchNormalization( - _BatchNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - momentum=AttrFloat32(momentum, name="momentum"), - training_mode=AttrInt64(training_mode, name="training_mode"), - ), - _BatchNormalization.Inputs( - X=X, - scale=scale, - B=B, - input_mean=input_mean, - input_var=input_var, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + scale=scale, + B=B, + input_mean=input_mean, + input_var=input_var, + ) + return ( + _BatchNormalization( + _BatchNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + momentum=AttrFloat32(momentum, name="momentum"), + training_mode=AttrInt64(training_mode, name="training_mode"), + ), + _BatchNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + B=unwrap_vars(B), + input_mean=unwrap_vars(input_mean), + input_var=unwrap_vars(input_var), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def bernoulli( @@ -4700,15 +4808,22 @@ def bernoulli( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Bernoulli( - _Bernoulli.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Bernoulli.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Bernoulli( + _Bernoulli.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _Bernoulli.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def bit_shift( @@ -4761,15 +4876,23 @@ def bit_shift( Type constraints: - T: `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitShift( - _BitShift.Attributes( - direction=AttrString(direction, name="direction"), - ), - _BitShift.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + return ( + _BitShift( + _BitShift.Attributes( + direction=AttrString(direction, name="direction"), + ), + _BitShift.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) def blackman_window( @@ -4813,15 +4936,22 @@ def blackman_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BlackmanWindow( - _BlackmanWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _BlackmanWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + return ( + _BlackmanWindow( + _BlackmanWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _BlackmanWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cast( @@ -4906,14 +5036,21 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Cast( + _Cast.Attributes( + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cast_like( @@ -4950,13 +5087,21 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes(), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + return ( + _CastLike( + _CastLike.Attributes(), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def ceil( @@ -4987,12 +5132,19 @@ def ceil( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Ceil( - _Ceil.Attributes(), - _Ceil.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Ceil( + _Ceil.Attributes(), + _Ceil.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def celu( @@ -5031,14 +5183,21 @@ def celu( Type constraints: - T: `tensor(float)` """ - return _Celu( - _Celu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Celu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Celu( + _Celu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _Celu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def clip( @@ -5078,14 +5237,23 @@ def clip( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Clip( - _Clip.Attributes(), - _Clip.Inputs( - input=input, - min=min, - max=max, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + min=min, + max=max, + ) + return ( + _Clip( + _Clip.Attributes(), + _Clip.Inputs( + input=unwrap_vars(input), + min=unwrap_vars(min), + max=unwrap_vars(max), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def compress( @@ -5134,15 +5302,23 @@ def compress( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Compress( - _Compress.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - ), - _Compress.Inputs( - input=input, - condition=condition, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + condition=condition, + ) + return ( + _Compress( + _Compress.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + ), + _Compress.Inputs( + input=unwrap_vars(input), + condition=unwrap_vars(condition), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def concat( @@ -5178,14 +5354,21 @@ def concat( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Concat( - _Concat.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Concat.Inputs( - inputs=inputs, - ), - ).outputs.concat_result + input_prop_values = create_prop_dict( + inputs=inputs, + ) + return ( + _Concat( + _Concat.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Concat.Inputs( + inputs=unwrap_vars(inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .concat_result + ) def concat_from_sequence( @@ -5230,15 +5413,22 @@ def concat_from_sequence( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConcatFromSequence( - _ConcatFromSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - new_axis=AttrInt64(new_axis, name="new_axis"), - ), - _ConcatFromSequence.Inputs( - input_sequence=input_sequence, - ), - ).outputs.concat_result + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + ) + return ( + _ConcatFromSequence( + _ConcatFromSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + new_axis=AttrInt64(new_axis, name="new_axis"), + ), + _ConcatFromSequence.Inputs( + input_sequence=unwrap_vars(input_sequence), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .concat_result + ) def constant( @@ -5296,18 +5486,23 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def constant_of_shape( @@ -5347,14 +5542,21 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def conv( @@ -5454,21 +5656,30 @@ def conv( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Conv( - _Conv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Conv.Inputs( - X=X, - W=W, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + B=B, + ) + return ( + _Conv( + _Conv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _Conv.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def conv_integer( @@ -5579,22 +5790,32 @@ def conv_integer( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int32)` """ - return _ConvInteger( - _ConvInteger.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvInteger.Inputs( - x=x, - w=w, - x_zero_point=x_zero_point, - w_zero_point=w_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + w=w, + x_zero_point=x_zero_point, + w_zero_point=w_zero_point, + ) + return ( + _ConvInteger( + _ConvInteger.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _ConvInteger.Inputs( + x=unwrap_vars(x), + w=unwrap_vars(w), + x_zero_point=unwrap_vars(x_zero_point), + w_zero_point=unwrap_vars(w_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def conv_transpose( @@ -5725,23 +5946,32 @@ def conv_transpose( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _ConvTranspose( - _ConvTranspose.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), - output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvTranspose.Inputs( - X=X, - W=W, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + B=B, + ) + return ( + _ConvTranspose( + _ConvTranspose.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), + output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _ConvTranspose.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def cos( @@ -5769,12 +5999,19 @@ def cos( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Cos( - _Cos.Attributes(), - _Cos.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Cos( + _Cos.Attributes(), + _Cos.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cosh( @@ -5802,12 +6039,19 @@ def cosh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Cosh( - _Cosh.Attributes(), - _Cosh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Cosh( + _Cosh.Attributes(), + _Cosh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cumsum( @@ -5875,16 +6119,24 @@ def cumsum( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _CumSum( - _CumSum.Attributes( - exclusive=AttrInt64(exclusive, name="exclusive"), - reverse=AttrInt64(reverse, name="reverse"), - ), - _CumSum.Inputs( - x=x, - axis=axis, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + axis=axis, + ) + return ( + _CumSum( + _CumSum.Attributes( + exclusive=AttrInt64(exclusive, name="exclusive"), + reverse=AttrInt64(reverse, name="reverse"), + ), + _CumSum.Inputs( + x=unwrap_vars(x), + axis=unwrap_vars(axis), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def dft( @@ -5960,17 +6212,25 @@ def dft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _DFT( - _DFT.Attributes( - axis=AttrInt64(axis, name="axis"), - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=input, - dft_length=dft_length, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + dft_length=dft_length, + ) + return ( + _DFT( + _DFT.Attributes( + axis=AttrInt64(axis, name="axis"), + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), + _DFT.Inputs( + input=unwrap_vars(input), + dft_length=unwrap_vars(dft_length), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def depth_to_space( @@ -6035,15 +6295,22 @@ def depth_to_space( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _DepthToSpace( - _DepthToSpace.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - mode=AttrString(mode, name="mode"), - ), - _DepthToSpace.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _DepthToSpace( + _DepthToSpace.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + mode=AttrString(mode, name="mode"), + ), + _DepthToSpace.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def dequantize_linear( @@ -6096,16 +6363,25 @@ def dequantize_linear( Type constraints: - T: `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + return ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def det( @@ -6138,12 +6414,19 @@ def det( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Det( - _Det.Attributes(), - _Det.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Det( + _Det.Attributes(), + _Det.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def div( @@ -6183,13 +6466,21 @@ def div( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Div( - _Div.Attributes(), - _Div.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Div( + _Div.Attributes(), + _Div.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def dropout( @@ -6268,16 +6559,25 @@ def dropout( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bool)` """ - return _Dropout( - _Dropout.Attributes( - seed=AttrInt64.maybe(seed, name="seed"), - ), - _Dropout.Inputs( - data=data, - ratio=ratio, - training_mode=training_mode, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + data=data, + ratio=ratio, + training_mode=training_mode, + ) + return ( + _Dropout( + _Dropout.Attributes( + seed=AttrInt64.maybe(seed, name="seed"), + ), + _Dropout.Inputs( + data=unwrap_vars(data), + ratio=unwrap_vars(ratio), + training_mode=unwrap_vars(training_mode), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def dynamic_quantize_linear( @@ -6347,12 +6647,19 @@ def dynamic_quantize_linear( - T1: `tensor(float)` - T2: `tensor(uint8)` """ - return _DynamicQuantizeLinear( - _DynamicQuantizeLinear.Attributes(), - _DynamicQuantizeLinear.Inputs( - x=x, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + x=x, + ) + return ( + _DynamicQuantizeLinear( + _DynamicQuantizeLinear.Attributes(), + _DynamicQuantizeLinear.Inputs( + x=unwrap_vars(x), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def einsum( @@ -6417,14 +6724,21 @@ def einsum( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Einsum( - _Einsum.Attributes( - equation=AttrString(equation, name="equation"), - ), - _Einsum.Inputs( - Inputs=Inputs, - ), - ).outputs.Output + input_prop_values = create_prop_dict( + Inputs=Inputs, + ) + return ( + _Einsum( + _Einsum.Attributes( + equation=AttrString(equation, name="equation"), + ), + _Einsum.Inputs( + Inputs=unwrap_vars(Inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Output + ) def elu( @@ -6460,14 +6774,21 @@ def elu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Elu( - _Elu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Elu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Elu( + _Elu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _Elu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def equal( @@ -6506,13 +6827,21 @@ def equal( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Equal( + _Equal.Attributes(), + _Equal.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def erf( @@ -6541,12 +6870,19 @@ def erf( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Erf( - _Erf.Attributes(), - _Erf.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Erf( + _Erf.Attributes(), + _Erf.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def exp( @@ -6574,12 +6910,19 @@ def exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Exp( - _Exp.Attributes(), - _Exp.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Exp( + _Exp.Attributes(), + _Exp.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def expand( @@ -6620,13 +6963,21 @@ def expand( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Expand( - _Expand.Attributes(), - _Expand.Inputs( - input=input, - shape=shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + shape=shape, + ) + return ( + _Expand( + _Expand.Attributes(), + _Expand.Inputs( + input=unwrap_vars(input), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def eye_like( @@ -6677,15 +7028,22 @@ def eye_like( - T1: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _EyeLike( - _EyeLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - k=AttrInt64(k, name="k"), - ), - _EyeLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _EyeLike( + _EyeLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + k=AttrInt64(k, name="k"), + ), + _EyeLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def flatten( @@ -6727,14 +7085,21 @@ def flatten( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Flatten.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def floor( @@ -6765,12 +7130,19 @@ def floor( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Floor( - _Floor.Attributes(), - _Floor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Floor( + _Floor.Attributes(), + _Floor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def gru( @@ -6945,30 +7317,44 @@ def gru( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _GRU( - _GRU.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - linear_before_reset=AttrInt64( - linear_before_reset, name="linear_before_reset" - ), - ), - _GRU.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + ) + return ( + _GRU( + _GRU.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + linear_before_reset=AttrInt64( + linear_before_reset, name="linear_before_reset" + ), + ), + _GRU.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def gather( @@ -7057,15 +7443,23 @@ def gather( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Gather( - _Gather.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Gather.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + return ( + _Gather( + _Gather.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Gather.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def gather_elements( @@ -7162,15 +7556,23 @@ def gather_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _GatherElements( - _GatherElements.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _GatherElements.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + return ( + _GatherElements( + _GatherElements.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _GatherElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def gather_nd( @@ -7312,15 +7714,23 @@ def gather_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _GatherND( - _GatherND.Attributes( - batch_dims=AttrInt64(batch_dims, name="batch_dims"), - ), - _GatherND.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + return ( + _GatherND( + _GatherND.Attributes( + batch_dims=AttrInt64(batch_dims, name="batch_dims"), + ), + _GatherND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def gemm( @@ -7396,19 +7806,28 @@ def gemm( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _Gemm( - _Gemm.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - transA=AttrInt64(transA, name="transA"), - transB=AttrInt64(transB, name="transB"), - ), - _Gemm.Inputs( - A=A, - B=B, - C=C, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + C=C, + ) + return ( + _Gemm( + _Gemm.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + transA=AttrInt64(transA, name="transA"), + transB=AttrInt64(transB, name="transB"), + ), + _Gemm.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + C=unwrap_vars(C), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def global_average_pool( @@ -7445,12 +7864,19 @@ def global_average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalAveragePool( - _GlobalAveragePool.Attributes(), - _GlobalAveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _GlobalAveragePool( + _GlobalAveragePool.Attributes(), + _GlobalAveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def global_lp_pool( @@ -7492,14 +7918,21 @@ def global_lp_pool( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalLpPool( - _GlobalLpPool.Attributes( - p=AttrInt64(p, name="p"), - ), - _GlobalLpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _GlobalLpPool( + _GlobalLpPool.Attributes( + p=AttrInt64(p, name="p"), + ), + _GlobalLpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def global_max_pool( @@ -7536,12 +7969,19 @@ def global_max_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalMaxPool( - _GlobalMaxPool.Attributes(), - _GlobalMaxPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _GlobalMaxPool( + _GlobalMaxPool.Attributes(), + _GlobalMaxPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def greater( @@ -7580,14 +8020,22 @@ def greater( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Greater( - _Greater.Attributes(), - _Greater.Inputs( - A=A, - B=B, - ), - ).outputs.C - + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Greater( + _Greater.Attributes(), + _Greater.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + def greater_or_equal( A: Var, @@ -7625,13 +8073,21 @@ def greater_or_equal( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _GreaterOrEqual( - _GreaterOrEqual.Attributes(), - _GreaterOrEqual.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _GreaterOrEqual( + _GreaterOrEqual.Attributes(), + _GreaterOrEqual.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def grid_sample( @@ -7716,17 +8172,25 @@ def grid_sample( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=X, - grid=grid, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + grid=grid, + ) + return ( + _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), + _GridSample.Inputs( + X=unwrap_vars(X), + grid=unwrap_vars(grid), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def hamming_window( @@ -7770,15 +8234,22 @@ def hamming_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _HammingWindow( - _HammingWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HammingWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + return ( + _HammingWindow( + _HammingWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _HammingWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def hann_window( @@ -7822,15 +8293,22 @@ def hann_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _HannWindow( - _HannWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HannWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + return ( + _HannWindow( + _HannWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _HannWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def hard_sigmoid( @@ -7869,15 +8347,22 @@ def hard_sigmoid( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _HardSigmoid( - _HardSigmoid.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - ), - _HardSigmoid.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _HardSigmoid( + _HardSigmoid.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + ), + _HardSigmoid.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def hard_swish( @@ -7908,12 +8393,19 @@ def hard_swish( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _HardSwish( - _HardSwish.Attributes(), - _HardSwish.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _HardSwish( + _HardSwish.Attributes(), + _HardSwish.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def hardmax( @@ -7955,14 +8447,21 @@ def hardmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Hardmax( - _Hardmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Hardmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Hardmax( + _Hardmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Hardmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def identity( @@ -7990,12 +8489,19 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def if_( @@ -8052,16 +8558,23 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + return ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) def instance_normalization( @@ -8110,16 +8623,25 @@ def instance_normalization( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _InstanceNormalization( - _InstanceNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - ), - _InstanceNormalization.Inputs( - input=input, - scale=scale, - B=B, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + scale=scale, + B=B, + ) + return ( + _InstanceNormalization( + _InstanceNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + ), + _InstanceNormalization.Inputs( + input=unwrap_vars(input), + scale=unwrap_vars(scale), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def isinf( @@ -8161,15 +8683,22 @@ def isinf( - T1: `tensor(double)`, `tensor(float)` - T2: `tensor(bool)` """ - return _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), + _IsInf.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def isnan( @@ -8198,12 +8727,19 @@ def isnan( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bool)` """ - return _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _IsNaN( + _IsNaN.Attributes(), + _IsNaN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def lrn( @@ -8265,17 +8801,24 @@ def lrn( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LRN( - _LRN.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - bias=AttrFloat32(bias, name="bias"), - size=AttrInt64(size, name="size"), - ), - _LRN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LRN( + _LRN.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + bias=AttrFloat32(bias, name="bias"), + size=AttrInt64(size, name="size"), + ), + _LRN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def lstm( @@ -8472,30 +9015,46 @@ def lstm( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _LSTM( - _LSTM.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - input_forget=AttrInt64(input_forget, name="input_forget"), - layout=AttrInt64(layout, name="layout"), - ), - _LSTM.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - initial_c=initial_c, - P=P, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + initial_c=initial_c, + P=P, + ) + return ( + _LSTM( + _LSTM.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + input_forget=AttrInt64(input_forget, name="input_forget"), + layout=AttrInt64(layout, name="layout"), + ), + _LSTM.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + initial_c=unwrap_vars(initial_c), + P=unwrap_vars(P), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def layer_normalization( @@ -8581,18 +9140,27 @@ def layer_normalization( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - U: `tensor(bfloat16)`, `tensor(float)` """ - return _LayerNormalization( - _LayerNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - epsilon=AttrFloat32(epsilon, name="epsilon"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _LayerNormalization.Inputs( - X=X, - Scale=Scale, - B=B, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + Scale=Scale, + B=B, + ) + return ( + _LayerNormalization( + _LayerNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + epsilon=AttrFloat32(epsilon, name="epsilon"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), + _LayerNormalization.Inputs( + X=unwrap_vars(X), + Scale=unwrap_vars(Scale), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def leaky_relu( @@ -8628,14 +9196,21 @@ def leaky_relu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LeakyRelu( - _LeakyRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _LeakyRelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LeakyRelu( + _LeakyRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _LeakyRelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def less( @@ -8674,13 +9249,21 @@ def less( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Less( - _Less.Attributes(), - _Less.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Less( + _Less.Attributes(), + _Less.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def less_or_equal( @@ -8719,13 +9302,21 @@ def less_or_equal( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _LessOrEqual( - _LessOrEqual.Attributes(), - _LessOrEqual.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _LessOrEqual( + _LessOrEqual.Attributes(), + _LessOrEqual.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def log( @@ -8753,12 +9344,19 @@ def log( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Log( - _Log.Attributes(), - _Log.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Log( + _Log.Attributes(), + _Log.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def log_softmax( @@ -8799,14 +9397,21 @@ def log_softmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LogSoftmax( - _LogSoftmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _LogSoftmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _LogSoftmax( + _LogSoftmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _LogSoftmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def loop( @@ -8990,17 +9595,26 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + return ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) def lp_normalization( @@ -9037,15 +9651,22 @@ def lp_normalization( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpNormalization( - _LpNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - p=AttrInt64(p, name="p"), - ), - _LpNormalization.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _LpNormalization( + _LpNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + p=AttrInt64(p, name="p"), + ), + _LpNormalization.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def lp_pool( @@ -9119,18 +9740,25 @@ def lp_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _LpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def matmul( @@ -9163,13 +9791,21 @@ def matmul( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _MatMul( - _MatMul.Attributes(), - _MatMul.Inputs( - A=A, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _MatMul( + _MatMul.Attributes(), + _MatMul.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def matmul_integer( @@ -9224,15 +9860,25 @@ def matmul_integer( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int32)` """ - return _MatMulInteger( - _MatMulInteger.Attributes(), - _MatMulInteger.Inputs( - A=A, - B=B, - a_zero_point=a_zero_point, - b_zero_point=b_zero_point, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + a_zero_point=a_zero_point, + b_zero_point=b_zero_point, + ) + return ( + _MatMulInteger( + _MatMulInteger.Attributes(), + _MatMulInteger.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + a_zero_point=unwrap_vars(a_zero_point), + b_zero_point=unwrap_vars(b_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def max( @@ -9264,12 +9910,19 @@ def max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Max( - _Max.Attributes(), - _Max.Inputs( - data_0=data_0, - ), - ).outputs.max + input_prop_values = create_prop_dict( + data_0=data_0, + ) + return ( + _Max( + _Max.Attributes(), + _Max.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .max + ) def max_pool( @@ -9408,20 +10061,27 @@ def max_pool( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int8)`, `tensor(uint8)` - I: `tensor(int64)` """ - return _MaxPool( - _MaxPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - storage_order=AttrInt64(storage_order, name="storage_order"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxPool.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _MaxPool( + _MaxPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + storage_order=AttrInt64(storage_order, name="storage_order"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _MaxPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def max_roi_pool( @@ -9469,16 +10129,24 @@ def max_roi_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _MaxRoiPool( - _MaxRoiPool.Attributes( - pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _MaxRoiPool.Inputs( - X=X, - rois=rois, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + rois=rois, + ) + return ( + _MaxRoiPool( + _MaxRoiPool.Attributes( + pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), + _MaxRoiPool.Inputs( + X=unwrap_vars(X), + rois=unwrap_vars(rois), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def max_unpool( @@ -9577,18 +10245,27 @@ def max_unpool( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _MaxUnpool( - _MaxUnpool.Attributes( - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxUnpool.Inputs( - X=X, - I=I, - output_shape=output_shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + X=X, + I=I, + output_shape=output_shape, + ) + return ( + _MaxUnpool( + _MaxUnpool.Attributes( + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _MaxUnpool.Inputs( + X=unwrap_vars(X), + I=unwrap_vars(I), + output_shape=unwrap_vars(output_shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def mean( @@ -9620,12 +10297,19 @@ def mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Mean( - _Mean.Attributes(), - _Mean.Inputs( - data_0=data_0, - ), - ).outputs.mean + input_prop_values = create_prop_dict( + data_0=data_0, + ) + return ( + _Mean( + _Mean.Attributes(), + _Mean.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .mean + ) def mean_variance_normalization( @@ -9663,14 +10347,21 @@ def mean_variance_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _MeanVarianceNormalization( - _MeanVarianceNormalization.Attributes( - axes=AttrInt64s(axes, name="axes"), - ), - _MeanVarianceNormalization.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _MeanVarianceNormalization( + _MeanVarianceNormalization.Attributes( + axes=AttrInt64s(axes, name="axes"), + ), + _MeanVarianceNormalization.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def mel_weight_matrix( @@ -9747,18 +10438,29 @@ def mel_weight_matrix( - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T3: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _MelWeightMatrix( - _MelWeightMatrix.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - ), - _MelWeightMatrix.Inputs( - num_mel_bins=num_mel_bins, - dft_length=dft_length, - sample_rate=sample_rate, - lower_edge_hertz=lower_edge_hertz, - upper_edge_hertz=upper_edge_hertz, - ), - ).outputs.output + input_prop_values = create_prop_dict( + num_mel_bins=num_mel_bins, + dft_length=dft_length, + sample_rate=sample_rate, + lower_edge_hertz=lower_edge_hertz, + upper_edge_hertz=upper_edge_hertz, + ) + return ( + _MelWeightMatrix( + _MelWeightMatrix.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + ), + _MelWeightMatrix.Inputs( + num_mel_bins=unwrap_vars(num_mel_bins), + dft_length=unwrap_vars(dft_length), + sample_rate=unwrap_vars(sample_rate), + lower_edge_hertz=unwrap_vars(lower_edge_hertz), + upper_edge_hertz=unwrap_vars(upper_edge_hertz), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def min( @@ -9790,12 +10492,19 @@ def min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Min( - _Min.Attributes(), - _Min.Inputs( - data_0=data_0, - ), - ).outputs.min + input_prop_values = create_prop_dict( + data_0=data_0, + ) + return ( + _Min( + _Min.Attributes(), + _Min.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .min + ) def mod( @@ -9850,15 +10559,23 @@ def mod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Mod( - _Mod.Attributes( - fmod=AttrInt64(fmod, name="fmod"), - ), - _Mod.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Mod( + _Mod.Attributes( + fmod=AttrInt64(fmod, name="fmod"), + ), + _Mod.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def mul( @@ -9898,13 +10615,21 @@ def mul( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Mul( - _Mul.Attributes(), - _Mul.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Mul( + _Mul.Attributes(), + _Mul.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def multinomial( @@ -9954,16 +10679,23 @@ def multinomial( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _Multinomial( - _Multinomial.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - sample_size=AttrInt64(sample_size, name="sample_size"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Multinomial.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Multinomial( + _Multinomial.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + sample_size=AttrInt64(sample_size, name="sample_size"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _Multinomial.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def neg( @@ -9993,12 +10725,19 @@ def neg( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` """ - return _Neg( - _Neg.Attributes(), - _Neg.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Neg( + _Neg.Attributes(), + _Neg.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def negative_log_likelihood_loss( @@ -10158,17 +10897,26 @@ def negative_log_likelihood_loss( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _NegativeLogLikelihoodLoss( - _NegativeLogLikelihoodLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _NegativeLogLikelihoodLoss.Inputs( - input=input, - target=target, - weight=weight, - ), - ).outputs.loss + input_prop_values = create_prop_dict( + input=input, + target=target, + weight=weight, + ) + return ( + _NegativeLogLikelihoodLoss( + _NegativeLogLikelihoodLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), + _NegativeLogLikelihoodLoss.Inputs( + input=unwrap_vars(input), + target=unwrap_vars(target), + weight=unwrap_vars(weight), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .loss + ) def non_max_suppression( @@ -10237,18 +10985,29 @@ def non_max_suppression( Signature: ``ai.onnx@11::NonMaxSuppression``. """ - return _NonMaxSuppression( - _NonMaxSuppression.Attributes( - center_point_box=AttrInt64(center_point_box, name="center_point_box"), - ), - _NonMaxSuppression.Inputs( - boxes=boxes, - scores=scores, - max_output_boxes_per_class=max_output_boxes_per_class, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - ), - ).outputs.selected_indices + input_prop_values = create_prop_dict( + boxes=boxes, + scores=scores, + max_output_boxes_per_class=max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + return ( + _NonMaxSuppression( + _NonMaxSuppression.Attributes( + center_point_box=AttrInt64(center_point_box, name="center_point_box"), + ), + _NonMaxSuppression.Inputs( + boxes=unwrap_vars(boxes), + scores=unwrap_vars(scores), + max_output_boxes_per_class=unwrap_vars(max_output_boxes_per_class), + iou_threshold=unwrap_vars(iou_threshold), + score_threshold=unwrap_vars(score_threshold), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .selected_indices + ) def non_zero( @@ -10280,12 +11039,19 @@ def non_zero( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _NonZero( - _NonZero.Attributes(), - _NonZero.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _NonZero( + _NonZero.Attributes(), + _NonZero.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def not_( @@ -10313,12 +11079,19 @@ def not_( Type constraints: - T: `tensor(bool)` """ - return _Not( - _Not.Attributes(), - _Not.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Not( + _Not.Attributes(), + _Not.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def one_hot( @@ -10402,16 +11175,25 @@ def one_hot( - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T3: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OneHot( - _OneHot.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _OneHot.Inputs( - indices=indices, - depth=depth, - values=values, - ), - ).outputs.output + input_prop_values = create_prop_dict( + indices=indices, + depth=depth, + values=values, + ) + return ( + _OneHot( + _OneHot.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _OneHot.Inputs( + indices=unwrap_vars(indices), + depth=unwrap_vars(depth), + values=unwrap_vars(values), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def optional( @@ -10447,14 +11229,21 @@ def optional( - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` """ - return _Optional( - _Optional.Attributes( - type=AttrType.maybe(type, name="type"), - ), - _Optional.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Optional( + _Optional.Attributes( + type=AttrType.maybe(type, name="type"), + ), + _Optional.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def optional_get_element( @@ -10485,12 +11274,19 @@ def optional_get_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _OptionalGetElement( + _OptionalGetElement.Attributes(), + _OptionalGetElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def optional_has_element( @@ -10521,12 +11317,19 @@ def optional_has_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - B: `tensor(bool)` """ - return _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _OptionalHasElement( + _OptionalHasElement.Attributes(), + _OptionalHasElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def or_( @@ -10565,13 +11368,21 @@ def or_( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _Or( - _Or.Attributes(), - _Or.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Or( + _Or.Attributes(), + _Or.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def prelu( @@ -10610,13 +11421,21 @@ def prelu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _PRelu( - _PRelu.Attributes(), - _PRelu.Inputs( - X=X, - slope=slope, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + slope=slope, + ) + return ( + _PRelu( + _PRelu.Attributes(), + _PRelu.Inputs( + X=unwrap_vars(X), + slope=unwrap_vars(slope), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def pad( @@ -10713,16 +11532,25 @@ def pad( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + ) + return ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def pow( @@ -10760,13 +11588,21 @@ def pow( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)` - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Pow( - _Pow.Attributes(), - _Pow.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + return ( + _Pow( + _Pow.Attributes(), + _Pow.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) def qlinear_conv( @@ -10909,27 +11745,42 @@ def qlinear_conv( - T3: `tensor(int8)`, `tensor(uint8)` - T4: `tensor(int32)` """ - return _QLinearConv( - _QLinearConv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _QLinearConv.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - w=w, - w_scale=w_scale, - w_zero_point=w_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - B=B, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + w=w, + w_scale=w_scale, + w_zero_point=w_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + B=B, + ) + return ( + _QLinearConv( + _QLinearConv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _QLinearConv.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + w=unwrap_vars(w), + w_scale=unwrap_vars(w_scale), + w_zero_point=unwrap_vars(w_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def qlinear_matmul( @@ -11004,19 +11855,33 @@ def qlinear_matmul( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int8)`, `tensor(uint8)` """ - return _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=a, - a_scale=a_scale, - a_zero_point=a_zero_point, - b=b, - b_scale=b_scale, - b_zero_point=b_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + a=a, + a_scale=a_scale, + a_zero_point=a_zero_point, + b=b, + b_scale=b_scale, + b_zero_point=b_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + return ( + _QLinearMatMul( + _QLinearMatMul.Attributes(), + _QLinearMatMul.Inputs( + a=unwrap_vars(a), + a_scale=unwrap_vars(a_scale), + a_zero_point=unwrap_vars(a_zero_point), + b=unwrap_vars(b), + b_scale=unwrap_vars(b_scale), + b_zero_point=unwrap_vars(b_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def quantize_linear( @@ -11073,16 +11938,25 @@ def quantize_linear( - T1: `tensor(float)`, `tensor(int32)` - T2: `tensor(int8)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + return ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def rnn( @@ -11237,27 +12111,41 @@ def rnn( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _RNN( - _RNN.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - ), - _RNN.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + ) + return ( + _RNN( + _RNN.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + ), + _RNN.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def random_normal( @@ -11311,16 +12199,21 @@ def random_normal( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomNormal( - _RandomNormal.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomNormal.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _RandomNormal( + _RandomNormal.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), + _RandomNormal.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def random_normal_like( @@ -11376,17 +12269,24 @@ def random_normal_like( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomNormalLike( - _RandomNormalLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomNormalLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _RandomNormalLike( + _RandomNormalLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _RandomNormalLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def random_uniform( @@ -11439,16 +12339,21 @@ def random_uniform( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomUniform( - _RandomUniform.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomUniform.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _RandomUniform( + _RandomUniform.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), + _RandomUniform.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def random_uniform_like( @@ -11504,17 +12409,24 @@ def random_uniform_like( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomUniformLike( - _RandomUniformLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomUniformLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _RandomUniformLike( + _RandomUniformLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _RandomUniformLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def range( @@ -11581,14 +12493,23 @@ def range( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)` """ - return _Range( - _Range.Attributes(), - _Range.Inputs( - start=start, - limit=limit, - delta=delta, - ), - ).outputs.output + input_prop_values = create_prop_dict( + start=start, + limit=limit, + delta=delta, + ) + return ( + _Range( + _Range.Attributes(), + _Range.Inputs( + start=unwrap_vars(start), + limit=unwrap_vars(limit), + delta=unwrap_vars(delta), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def reciprocal( @@ -11618,12 +12539,19 @@ def reciprocal( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Reciprocal( - _Reciprocal.Attributes(), - _Reciprocal.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Reciprocal( + _Reciprocal.Attributes(), + _Reciprocal.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def reduce_l1( @@ -11670,15 +12598,22 @@ def reduce_l1( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL1( - _ReduceL1.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL1.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceL1( + _ReduceL1.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceL1.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_l2( @@ -11725,15 +12660,22 @@ def reduce_l2( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL2( - _ReduceL2.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL2.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceL2( + _ReduceL2.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceL2.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_log_sum( @@ -11781,15 +12723,22 @@ def reduce_log_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSum( - _ReduceLogSum.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSum.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceLogSum( + _ReduceLogSum.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceLogSum.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_log_sum_exp( @@ -11837,15 +12786,22 @@ def reduce_log_sum_exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSumExp.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_max( @@ -11894,15 +12850,22 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMax.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceMax( + _ReduceMax.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMax.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_mean( @@ -11949,15 +12912,22 @@ def reduce_mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceMean( - _ReduceMean.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMean.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceMean( + _ReduceMean.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMean.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_min( @@ -12005,15 +12975,22 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMin.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceMin( + _ReduceMin.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMin.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_prod( @@ -12060,15 +13037,22 @@ def reduce_prod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceProd( - _ReduceProd.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceProd.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceProd( + _ReduceProd.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceProd.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_sum( @@ -12124,18 +13108,26 @@ def reduce_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSum( - _ReduceSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceSum( + _ReduceSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceSum.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceSum.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_sum_square( @@ -12182,15 +13174,22 @@ def reduce_sum_square( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSumSquare( - _ReduceSumSquare.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceSumSquare.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _ReduceSumSquare( + _ReduceSumSquare.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceSumSquare.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def relu( @@ -12220,12 +13219,19 @@ def relu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` """ - return _Relu( - _Relu.Attributes(), - _Relu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Relu( + _Relu.Attributes(), + _Relu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def reshape( @@ -12279,15 +13285,23 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + return ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) def resize( @@ -12409,26 +13423,37 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + return ( + _Resize( + _Resize.Attributes( + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def reverse_sequence( @@ -12493,16 +13518,24 @@ def reverse_sequence( Type constraints: - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReverseSequence( - _ReverseSequence.Attributes( - batch_axis=AttrInt64(batch_axis, name="batch_axis"), - time_axis=AttrInt64(time_axis, name="time_axis"), - ), - _ReverseSequence.Inputs( - input=input, - sequence_lens=sequence_lens, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + input=input, + sequence_lens=sequence_lens, + ) + return ( + _ReverseSequence( + _ReverseSequence.Attributes( + batch_axis=AttrInt64(batch_axis, name="batch_axis"), + time_axis=AttrInt64(time_axis, name="time_axis"), + ), + _ReverseSequence.Inputs( + input=unwrap_vars(input), + sequence_lens=unwrap_vars(sequence_lens), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def roi_align( @@ -12592,23 +13625,33 @@ def roi_align( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _RoiAlign( - _RoiAlign.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - mode=AttrString(mode, name="mode"), - output_height=AttrInt64(output_height, name="output_height"), - output_width=AttrInt64(output_width, name="output_width"), - sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _RoiAlign.Inputs( - X=X, - rois=rois, - batch_indices=batch_indices, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + rois=rois, + batch_indices=batch_indices, + ) + return ( + _RoiAlign( + _RoiAlign.Attributes( + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + mode=AttrString(mode, name="mode"), + output_height=AttrInt64(output_height, name="output_height"), + output_width=AttrInt64(output_width, name="output_width"), + sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), + _RoiAlign.Inputs( + X=unwrap_vars(X), + rois=unwrap_vars(rois), + batch_indices=unwrap_vars(batch_indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def round( @@ -12650,12 +13693,19 @@ def round( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Round( - _Round.Attributes(), - _Round.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Round( + _Round.Attributes(), + _Round.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def stft( @@ -12719,17 +13769,27 @@ def stft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _STFT( - _STFT.Attributes( - onesided=AttrInt64(onesided, name="onesided"), - ), - _STFT.Inputs( - signal=signal, - frame_step=frame_step, - window=window, - frame_length=frame_length, - ), - ).outputs.output + input_prop_values = create_prop_dict( + signal=signal, + frame_step=frame_step, + window=window, + frame_length=frame_length, + ) + return ( + _STFT( + _STFT.Attributes( + onesided=AttrInt64(onesided, name="onesided"), + ), + _STFT.Inputs( + signal=unwrap_vars(signal), + frame_step=unwrap_vars(frame_step), + window=unwrap_vars(window), + frame_length=unwrap_vars(frame_length), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def scan( @@ -12953,26 +14013,37 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + return ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) def scatter_elements( @@ -13095,17 +14166,26 @@ def scatter_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + return ( + _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def scatter_nd( @@ -13217,16 +14297,25 @@ def scatter_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + return ( + _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def selu( @@ -13268,15 +14357,22 @@ def selu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Selu( - _Selu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - gamma=AttrFloat32(gamma, name="gamma"), - ), - _Selu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Selu( + _Selu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + gamma=AttrFloat32(gamma, name="gamma"), + ), + _Selu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def sequence_at( @@ -13317,13 +14413,21 @@ def sequence_at( - I: `tensor(int32)`, `tensor(int64)` - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _SequenceAt( - _SequenceAt.Attributes(), - _SequenceAt.Inputs( - input_sequence=input_sequence, - position=position, - ), - ).outputs.tensor + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + position=position, + ) + return ( + _SequenceAt( + _SequenceAt.Attributes(), + _SequenceAt.Inputs( + input_sequence=unwrap_vars(input_sequence), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .tensor + ) def sequence_construct( @@ -13353,12 +14457,19 @@ def sequence_construct( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SequenceConstruct( - _SequenceConstruct.Attributes(), - _SequenceConstruct.Inputs( - inputs=inputs, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + inputs=inputs, + ) + return ( + _SequenceConstruct( + _SequenceConstruct.Attributes(), + _SequenceConstruct.Inputs( + inputs=unwrap_vars(inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) def sequence_empty( @@ -13388,12 +14499,17 @@ def sequence_empty( Type constraints: - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SequenceEmpty( - _SequenceEmpty.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - ), - _SequenceEmpty.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _SequenceEmpty( + _SequenceEmpty.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + ), + _SequenceEmpty.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def sequence_erase( @@ -13434,13 +14550,21 @@ def sequence_erase( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - I: `tensor(int32)`, `tensor(int64)` """ - return _SequenceErase( - _SequenceErase.Attributes(), - _SequenceErase.Inputs( - input_sequence=input_sequence, - position=position, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + position=position, + ) + return ( + _SequenceErase( + _SequenceErase.Attributes(), + _SequenceErase.Inputs( + input_sequence=unwrap_vars(input_sequence), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) def sequence_insert( @@ -13488,14 +14612,23 @@ def sequence_insert( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - I: `tensor(int32)`, `tensor(int64)` """ - return _SequenceInsert( - _SequenceInsert.Attributes(), - _SequenceInsert.Inputs( - input_sequence=input_sequence, - tensor=tensor, - position=position, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + tensor=tensor, + position=position, + ) + return ( + _SequenceInsert( + _SequenceInsert.Attributes(), + _SequenceInsert.Inputs( + input_sequence=unwrap_vars(input_sequence), + tensor=unwrap_vars(tensor), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) def sequence_length( @@ -13525,12 +14658,19 @@ def sequence_length( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - I: `tensor(int64)` """ - return _SequenceLength( - _SequenceLength.Attributes(), - _SequenceLength.Inputs( - input_sequence=input_sequence, - ), - ).outputs.length + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + ) + return ( + _SequenceLength( + _SequenceLength.Attributes(), + _SequenceLength.Inputs( + input_sequence=unwrap_vars(input_sequence), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .length + ) def sequence_map( @@ -13593,16 +14733,24 @@ def sequence_map( ], body, ) - return _SequenceMap( - _SequenceMap.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _SequenceMap.Inputs( - input_sequence=input_sequence, - additional_inputs=additional_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.out_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + additional_inputs=additional_inputs, + ) + return ( + _SequenceMap( + _SequenceMap.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _SequenceMap.Inputs( + input_sequence=unwrap_vars(input_sequence), + additional_inputs=unwrap_vars(additional_inputs), + ), + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .out_sequence + ) def shape( @@ -13681,15 +14829,22 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) def shrink( @@ -13729,15 +14884,22 @@ def shrink( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Shrink( - _Shrink.Attributes( - bias=AttrFloat32(bias, name="bias"), - lambd=AttrFloat32(lambd, name="lambd"), - ), - _Shrink.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Shrink( + _Shrink.Attributes( + bias=AttrFloat32(bias, name="bias"), + lambd=AttrFloat32(lambd, name="lambd"), + ), + _Shrink.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def sigmoid( @@ -13767,12 +14929,19 @@ def sigmoid( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sigmoid( - _Sigmoid.Attributes(), - _Sigmoid.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Sigmoid( + _Sigmoid.Attributes(), + _Sigmoid.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def sign( @@ -13802,12 +14971,19 @@ def sign( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Sign( - _Sign.Attributes(), - _Sign.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Sign( + _Sign.Attributes(), + _Sign.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def sin( @@ -13835,12 +15011,19 @@ def sin( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sin( - _Sin.Attributes(), - _Sin.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Sin( + _Sin.Attributes(), + _Sin.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def sinh( @@ -13868,12 +15051,19 @@ def sinh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sinh( - _Sinh.Attributes(), - _Sinh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Sinh( + _Sinh.Attributes(), + _Sinh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def size( @@ -13903,12 +15093,19 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) def slice( @@ -14024,16 +15221,27 @@ def slice( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Slice( - _Slice.Attributes(), - _Slice.Inputs( - data=data, - starts=starts, - ends=ends, - axes=axes, - steps=steps, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + starts=starts, + ends=ends, + axes=axes, + steps=steps, + ) + return ( + _Slice( + _Slice.Attributes(), + _Slice.Inputs( + data=unwrap_vars(data), + starts=unwrap_vars(starts), + ends=unwrap_vars(ends), + axes=unwrap_vars(axes), + steps=unwrap_vars(steps), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def softmax( @@ -14076,14 +15284,21 @@ def softmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softmax( - _Softmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Softmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Softmax( + _Softmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Softmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def softmax_cross_entropy_loss( @@ -14195,17 +15410,26 @@ def softmax_cross_entropy_loss( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _SoftmaxCrossEntropyLoss( - _SoftmaxCrossEntropyLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _SoftmaxCrossEntropyLoss.Inputs( - scores=scores, - labels=labels, - weights=weights, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + scores=scores, + labels=labels, + weights=weights, + ) + return ( + _SoftmaxCrossEntropyLoss( + _SoftmaxCrossEntropyLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), + _SoftmaxCrossEntropyLoss.Inputs( + scores=unwrap_vars(scores), + labels=unwrap_vars(labels), + weights=unwrap_vars(weights), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def softplus( @@ -14235,12 +15459,19 @@ def softplus( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softplus( - _Softplus.Attributes(), - _Softplus.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Softplus( + _Softplus.Attributes(), + _Softplus.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def softsign( @@ -14270,12 +15501,19 @@ def softsign( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softsign( - _Softsign.Attributes(), - _Softsign.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Softsign( + _Softsign.Attributes(), + _Softsign.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def space_to_depth( @@ -14312,14 +15550,21 @@ def space_to_depth( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _SpaceToDepth( - _SpaceToDepth.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - ), - _SpaceToDepth.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _SpaceToDepth( + _SpaceToDepth.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + ), + _SpaceToDepth.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def split( @@ -14364,16 +15609,24 @@ def split( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Split.Inputs( - input=input, - split=split, - ), - out_variadic=outputs_count, - ).outputs.outputs + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + return ( + _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Split.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + out_variadic=outputs_count, + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) def split_to_sequence( @@ -14431,16 +15684,24 @@ def split_to_sequence( - I: `tensor(int32)`, `tensor(int64)` - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SplitToSequence( - _SplitToSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _SplitToSequence.Inputs( - input=input, - split=split, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + return ( + _SplitToSequence( + _SplitToSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _SplitToSequence.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) def sqrt( @@ -14470,12 +15731,19 @@ def sqrt( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sqrt( - _Sqrt.Attributes(), - _Sqrt.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Sqrt( + _Sqrt.Attributes(), + _Sqrt.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def squeeze( @@ -14513,13 +15781,21 @@ def squeeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.squeezed + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _Squeeze( + _Squeeze.Attributes(), + _Squeeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .squeezed + ) def string_normalizer( @@ -14574,19 +15850,28 @@ def string_normalizer( Signature: ``ai.onnx@10::StringNormalizer``. """ - return _StringNormalizer( - _StringNormalizer.Attributes( - case_change_action=AttrString( - case_change_action, name="case_change_action" + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _StringNormalizer( + _StringNormalizer.Attributes( + case_change_action=AttrString( + case_change_action, name="case_change_action" + ), + is_case_sensitive=AttrInt64( + is_case_sensitive, name="is_case_sensitive" + ), + locale=AttrString.maybe(locale, name="locale"), + stopwords=AttrStrings.maybe(stopwords, name="stopwords"), + ), + _StringNormalizer.Inputs( + X=unwrap_vars(X), ), - is_case_sensitive=AttrInt64(is_case_sensitive, name="is_case_sensitive"), - locale=AttrString.maybe(locale, name="locale"), - stopwords=AttrStrings.maybe(stopwords, name="stopwords"), - ), - _StringNormalizer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def sub( @@ -14626,13 +15911,21 @@ def sub( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Sub( - _Sub.Attributes(), - _Sub.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Sub( + _Sub.Attributes(), + _Sub.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def sum( @@ -14664,12 +15957,19 @@ def sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sum( - _Sum.Attributes(), - _Sum.Inputs( - data_0=data_0, - ), - ).outputs.sum + input_prop_values = create_prop_dict( + data_0=data_0, + ) + return ( + _Sum( + _Sum.Attributes(), + _Sum.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .sum + ) def tan( @@ -14697,12 +15997,19 @@ def tan( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Tan( - _Tan.Attributes(), - _Tan.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Tan( + _Tan.Attributes(), + _Tan.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def tanh( @@ -14731,12 +16038,19 @@ def tanh( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Tanh( - _Tanh.Attributes(), - _Tanh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Tanh( + _Tanh.Attributes(), + _Tanh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def tf_idf_vectorizer( @@ -14868,22 +16182,29 @@ def tf_idf_vectorizer( - T: `tensor(int32)`, `tensor(int64)`, `tensor(string)` - T1: `tensor(float)` """ - return _TfIdfVectorizer( - _TfIdfVectorizer.Attributes( - max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), - max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), - min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), - mode=AttrString(mode, name="mode"), - ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), - ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), - pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), - pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), - weights=AttrFloat32s.maybe(weights, name="weights"), - ), - _TfIdfVectorizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _TfIdfVectorizer( + _TfIdfVectorizer.Attributes( + max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), + max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), + min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), + mode=AttrString(mode, name="mode"), + ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), + ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), + pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), + pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), + weights=AttrFloat32s.maybe(weights, name="weights"), + ), + _TfIdfVectorizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def thresholded_relu( @@ -14918,14 +16239,21 @@ def thresholded_relu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _ThresholdedRelu( - _ThresholdedRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _ThresholdedRelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _ThresholdedRelu( + _ThresholdedRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _ThresholdedRelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def tile( @@ -14962,13 +16290,21 @@ def tile( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Tile( - _Tile.Attributes(), - _Tile.Inputs( - input=input, - repeats=repeats, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + repeats=repeats, + ) + return ( + _Tile( + _Tile.Attributes(), + _Tile.Inputs( + input=unwrap_vars(input), + repeats=unwrap_vars(repeats), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def top_k( @@ -15046,17 +16382,25 @@ def top_k( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - I: `tensor(int64)` """ - return _TopK( - _TopK.Attributes( - axis=AttrInt64(axis, name="axis"), - largest=AttrInt64(largest, name="largest"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _TopK.Inputs( - X=X, - K=K, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + K=K, + ) + return ( + _TopK( + _TopK.Attributes( + axis=AttrInt64(axis, name="axis"), + largest=AttrInt64(largest, name="largest"), + sorted=AttrInt64(sorted, name="sorted"), + ), + _TopK.Inputs( + X=unwrap_vars(X), + K=unwrap_vars(K), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def transpose( @@ -15092,14 +16436,21 @@ def transpose( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=data, - ), - ).outputs.transposed + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), + _Transpose.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .transposed + ) def trilu( @@ -15155,15 +16506,23 @@ def trilu( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Trilu( - _Trilu.Attributes( - upper=AttrInt64(upper, name="upper"), - ), - _Trilu.Inputs( - input=input, - k=k, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + k=k, + ) + return ( + _Trilu( + _Trilu.Attributes( + upper=AttrInt64(upper, name="upper"), + ), + _Trilu.Inputs( + input=unwrap_vars(input), + k=unwrap_vars(k), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def unique( @@ -15335,15 +16694,22 @@ def unique( Type constraints: - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unique( - _Unique.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _Unique.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Unique( + _Unique.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + sorted=AttrInt64(sorted, name="sorted"), + ), + _Unique.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def unsqueeze( @@ -15391,13 +16757,21 @@ def unsqueeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.expanded + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _Unsqueeze( + _Unsqueeze.Attributes(), + _Unsqueeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .expanded + ) def where( @@ -15441,14 +16815,23 @@ def where( - B: `tensor(bool)` - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Where( - _Where.Attributes(), - _Where.Inputs( - condition=condition, - X=X, - Y=Y, - ), - ).outputs.output + input_prop_values = create_prop_dict( + condition=condition, + X=X, + Y=Y, + ) + return ( + _Where( + _Where.Attributes(), + _Where.Inputs( + condition=unwrap_vars(condition), + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def xor( @@ -15487,13 +16870,21 @@ def xor( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _Xor( - _Xor.Attributes(), - _Xor.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Xor( + _Xor.Attributes(), + _Xor.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v18.py b/src/spox/opset/ai/onnx/v18.py index 028c0775..031c2541 100644 --- a/src/spox/opset/ai/onnx/v18.py +++ b/src/spox/opset/ai/onnx/v18.py @@ -20,7 +20,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v17 import ( _DFT, _GRU, @@ -350,12 +350,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseAnd", "", 18) @@ -371,11 +371,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("BitwiseNot", "", 18) @@ -391,12 +391,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseOr", "", 18) @@ -412,12 +412,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseXor", "", 18) @@ -433,12 +433,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_data: Var - shape: Var + input_data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output_data: Var + output_data: _VarInfo op_type = OpType("CenterCropPad", "", 18) @@ -456,13 +456,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - image_shape: Var - block_shape: Var + input: _VarInfo + image_shape: _VarInfo + block_shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Col2Im", "", 18) @@ -479,13 +479,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - bias: Var + X: _VarInfo + scale: _VarInfo + bias: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GroupNormalization", "", 18) @@ -507,11 +507,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LpPool", "", 18) @@ -527,11 +527,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Mish", "", 18) @@ -547,11 +547,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalGetElement", "", 18) @@ -567,11 +567,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Optional[Var] + input: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalHasElement", "", 18) @@ -587,14 +587,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 18) @@ -611,12 +611,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL1", "", 18) @@ -633,12 +633,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL2", "", 18) @@ -655,12 +655,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSum", "", 18) @@ -677,12 +677,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSumExp", "", 18) @@ -699,12 +699,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 18) @@ -721,12 +721,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMean", "", 18) @@ -743,12 +743,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 18) @@ -765,12 +765,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceProd", "", 18) @@ -787,12 +787,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSumSquare", "", 18) @@ -816,14 +816,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 18) @@ -840,13 +840,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterElements", "", 18) @@ -862,13 +862,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterND", "", 18) @@ -885,12 +885,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Split", "", 18) @@ -934,13 +934,21 @@ def bitwise_and( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseAnd( - _BitwiseAnd.Attributes(), - _BitwiseAnd.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _BitwiseAnd( + _BitwiseAnd.Attributes(), + _BitwiseAnd.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def bitwise_not( @@ -968,12 +976,19 @@ def bitwise_not( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseNot( - _BitwiseNot.Attributes(), - _BitwiseNot.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _BitwiseNot( + _BitwiseNot.Attributes(), + _BitwiseNot.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def bitwise_or( @@ -1011,13 +1026,21 @@ def bitwise_or( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseOr( - _BitwiseOr.Attributes(), - _BitwiseOr.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _BitwiseOr( + _BitwiseOr.Attributes(), + _BitwiseOr.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def bitwise_xor( @@ -1055,13 +1078,21 @@ def bitwise_xor( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseXor( - _BitwiseXor.Attributes(), - _BitwiseXor.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _BitwiseXor( + _BitwiseXor.Attributes(), + _BitwiseXor.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def center_crop_pad( @@ -1111,15 +1142,23 @@ def center_crop_pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _CenterCropPad( - _CenterCropPad.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - ), - _CenterCropPad.Inputs( - input_data=input_data, - shape=shape, - ), - ).outputs.output_data + input_prop_values = create_prop_dict( + input_data=input_data, + shape=shape, + ) + return ( + _CenterCropPad( + _CenterCropPad.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + ), + _CenterCropPad.Inputs( + input_data=unwrap_vars(input_data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_data + ) def col2_im( @@ -1202,18 +1241,27 @@ def col2_im( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Col2Im( - _Col2Im.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Col2Im.Inputs( - input=input, - image_shape=image_shape, - block_shape=block_shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + image_shape=image_shape, + block_shape=block_shape, + ) + return ( + _Col2Im( + _Col2Im.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _Col2Im.Inputs( + input=unwrap_vars(input), + image_shape=unwrap_vars(image_shape), + block_shape=unwrap_vars(block_shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def group_normalization( @@ -1281,17 +1329,26 @@ def group_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - ), - _GroupNormalization.Inputs( - X=X, - scale=scale, - bias=bias, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + scale=scale, + bias=bias, + ) + return ( + _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + ), + _GroupNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + bias=unwrap_vars(bias), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def lp_pool( @@ -1402,20 +1459,27 @@ def lp_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _LpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def mish( @@ -1450,12 +1514,19 @@ def mish( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Mish( - _Mish.Attributes(), - _Mish.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Mish( + _Mish.Attributes(), + _Mish.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def optional_get_element( @@ -1487,12 +1558,19 @@ def optional_get_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _OptionalGetElement( + _OptionalGetElement.Attributes(), + _OptionalGetElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def optional_has_element( @@ -1524,12 +1602,19 @@ def optional_has_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - B: `tensor(bool)` """ - return _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _OptionalHasElement( + _OptionalHasElement.Attributes(), + _OptionalHasElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def pad( @@ -1666,17 +1751,27 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + return ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def reduce_l1( @@ -1732,18 +1827,26 @@ def reduce_l1( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL1( - _ReduceL1.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceL1( + _ReduceL1.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceL1.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceL1.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_l2( @@ -1799,18 +1902,26 @@ def reduce_l2( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL2( - _ReduceL2.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceL2( + _ReduceL2.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceL2.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceL2.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_log_sum( @@ -1867,18 +1978,26 @@ def reduce_log_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSum( - _ReduceLogSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceLogSum( + _ReduceLogSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceLogSum.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceLogSum.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_log_sum_exp( @@ -1935,18 +2054,26 @@ def reduce_log_sum_exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceLogSumExp.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_max( @@ -2004,18 +2131,26 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceMax.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceMax.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_mean( @@ -2071,18 +2206,26 @@ def reduce_mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceMean( - _ReduceMean.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceMean( + _ReduceMean.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMean.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMean.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_min( @@ -2139,18 +2282,26 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceMin.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceMin.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_prod( @@ -2206,18 +2357,26 @@ def reduce_prod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceProd( - _ReduceProd.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceProd( + _ReduceProd.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceProd.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceProd.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_sum_square( @@ -2273,18 +2432,26 @@ def reduce_sum_square( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSumSquare( - _ReduceSumSquare.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceSumSquare( + _ReduceSumSquare.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceSumSquare.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceSumSquare.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def resize( @@ -2457,31 +2624,42 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + return ( + _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + keep_aspect_ratio_policy=AttrString( + keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def scatter_elements( @@ -2607,17 +2785,26 @@ def scatter_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + return ( + _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def scatter_nd( @@ -2745,16 +2932,25 @@ def scatter_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + return ( + _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def split( @@ -2804,17 +3000,25 @@ def split( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), - ), - _Split.Inputs( - input=input, - split=split, - ), - out_variadic=num_outputs, - ).outputs.outputs + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + return ( + _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), + ), + _Split.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + out_variadic=num_outputs, + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v19.py b/src/spox/opset/ai/onnx/v19.py index 6c14823d..b9f5f915 100644 --- a/src/spox/opset/ai/onnx/v19.py +++ b/src/spox/opset/ai/onnx/v19.py @@ -29,8 +29,8 @@ from spox._node import OpType from spox._standard import StandardNode from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v18 import ( _DFT, _GRU, @@ -384,11 +384,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("AveragePool", "", 19) @@ -405,11 +405,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 19) @@ -425,12 +425,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 19) @@ -454,9 +454,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -501,15 +501,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - offset: Var - B: Optional[Var] - mask: Optional[Var] + X: _VarInfo + W: _VarInfo + offset: _VarInfo + B: Optional[_VarInfo] + mask: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("DeformConv", "", 19) @@ -525,13 +525,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 19) @@ -547,12 +547,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Equal", "", 19) @@ -568,11 +568,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 19) @@ -589,11 +589,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 19) @@ -609,13 +609,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Loop", "", 19) @@ -631,14 +631,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 19) @@ -655,13 +655,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 19) @@ -677,12 +677,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 19) @@ -706,14 +706,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 19) @@ -734,11 +734,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 19) @@ -755,11 +755,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 19) @@ -775,11 +775,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 19) @@ -915,20 +915,29 @@ def average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64( + count_include_pad, name="count_include_pad" + ), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _AveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def cast( @@ -1052,15 +1061,22 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cast_like( @@ -1106,15 +1122,23 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + return ( + _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def constant( @@ -1172,18 +1196,23 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def deform_conv( @@ -1282,23 +1311,34 @@ def deform_conv( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _DeformConv( - _DeformConv.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - offset_group=AttrInt64(offset_group, name="offset_group"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _DeformConv.Inputs( - X=X, - W=W, - offset=offset, - B=B, - mask=mask, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + offset=offset, + B=B, + mask=mask, + ) + return ( + _DeformConv( + _DeformConv.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + offset_group=AttrInt64(offset_group, name="offset_group"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _DeformConv.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + offset=unwrap_vars(offset), + B=unwrap_vars(B), + mask=unwrap_vars(mask), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def dequantize_linear( @@ -1358,16 +1398,25 @@ def dequantize_linear( - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + return ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def equal( @@ -1406,13 +1455,21 @@ def equal( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + return ( + _Equal( + _Equal.Attributes(), + _Equal.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) def identity( @@ -1440,12 +1497,19 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def if_( @@ -1502,16 +1566,23 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + return ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) def loop( @@ -1695,17 +1766,26 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + return ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) def pad( @@ -1868,17 +1948,27 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + return ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def quantize_linear( @@ -1947,17 +2037,26 @@ def quantize_linear( - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + return ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + saturate=AttrInt64(saturate, name="saturate"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def reshape( @@ -2011,15 +2110,23 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + return ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) def resize( @@ -2230,31 +2337,42 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + return ( + _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + keep_aspect_ratio_policy=AttrString( + keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def scan( @@ -2478,26 +2596,37 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + return ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) def shape( @@ -2576,15 +2705,22 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) def size( @@ -2614,12 +2750,19 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v20.py b/src/spox/opset/ai/onnx/v20.py index fa5a4c42..c228902c 100644 --- a/src/spox/opset/ai/onnx/v20.py +++ b/src/spox/opset/ai/onnx/v20.py @@ -18,7 +18,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v19 import ( _GRU, _LRN, @@ -386,12 +386,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - theta: Var - size: Var + theta: _VarInfo + size: _VarInfo @dataclass class Outputs(BaseOutputs): - grid: Var + grid: _VarInfo op_type = OpType("AffineGrid", "", 20) @@ -407,11 +407,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 20) @@ -428,13 +428,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - dft_length: Optional[Var] - axis: Optional[Var] + input: _VarInfo + dft_length: Optional[_VarInfo] + axis: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DFT", "", 20) @@ -450,11 +450,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Gelu", "", 20) @@ -472,12 +472,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - grid: Var + X: _VarInfo + grid: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GridSample", "", 20) @@ -493,11 +493,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - encoded_stream: Var + encoded_stream: _VarInfo @dataclass class Outputs(BaseOutputs): - image: Var + image: _VarInfo op_type = OpType("ImageDecoder", "", 20) @@ -514,11 +514,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsInf", "", 20) @@ -534,11 +534,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsNaN", "", 20) @@ -555,12 +555,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 20) @@ -577,12 +577,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 20) @@ -598,11 +598,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("RegexFullMatch", "", 20) @@ -618,12 +618,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("StringConcat", "", 20) @@ -640,12 +640,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("StringSplit", "", 20) @@ -733,15 +733,23 @@ def affine_grid( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _AffineGrid( - _AffineGrid.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - ), - _AffineGrid.Inputs( - theta=theta, - size=size, - ), - ).outputs.grid + input_prop_values = create_prop_dict( + theta=theta, + size=size, + ) + return ( + _AffineGrid( + _AffineGrid.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + ), + _AffineGrid.Inputs( + theta=unwrap_vars(theta), + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .grid + ) def constant_of_shape( @@ -781,14 +789,21 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def dft( @@ -881,17 +896,26 @@ def dft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _DFT( - _DFT.Attributes( - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=input, - dft_length=dft_length, - axis=axis, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + dft_length=dft_length, + axis=axis, + ) + return ( + _DFT( + _DFT.Attributes( + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), + _DFT.Inputs( + input=unwrap_vars(input), + dft_length=unwrap_vars(dft_length), + axis=unwrap_vars(axis), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def gelu( @@ -932,14 +956,21 @@ def gelu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Gelu( - _Gelu.Attributes( - approximate=AttrString(approximate, name="approximate"), - ), - _Gelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _Gelu( + _Gelu.Attributes( + approximate=AttrString(approximate, name="approximate"), + ), + _Gelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def grid_sample( @@ -1044,17 +1075,25 @@ def grid_sample( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=X, - grid=grid, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + grid=grid, + ) + return ( + _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), + _GridSample.Inputs( + X=unwrap_vars(X), + grid=unwrap_vars(grid), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def image_decoder( @@ -1115,14 +1154,21 @@ def image_decoder( - T1: `tensor(uint8)` - T2: `tensor(uint8)` """ - return _ImageDecoder( - _ImageDecoder.Attributes( - pixel_format=AttrString(pixel_format, name="pixel_format"), - ), - _ImageDecoder.Inputs( - encoded_stream=encoded_stream, - ), - ).outputs.image + input_prop_values = create_prop_dict( + encoded_stream=encoded_stream, + ) + return ( + _ImageDecoder( + _ImageDecoder.Attributes( + pixel_format=AttrString(pixel_format, name="pixel_format"), + ), + _ImageDecoder.Inputs( + encoded_stream=unwrap_vars(encoded_stream), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .image + ) def isinf( @@ -1164,15 +1210,22 @@ def isinf( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - T2: `tensor(bool)` """ - return _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), + _IsInf.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def isnan( @@ -1201,12 +1254,19 @@ def isnan( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - T2: `tensor(bool)` """ - return _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _IsNaN( + _IsNaN.Attributes(), + _IsNaN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def reduce_max( @@ -1267,18 +1327,26 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMax.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMax.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def reduce_min( @@ -1338,18 +1406,26 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMin.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMin.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) def regex_full_match( @@ -1388,14 +1464,21 @@ def regex_full_match( - T1: `tensor(string)` - T2: `tensor(bool)` """ - return _RegexFullMatch( - _RegexFullMatch.Attributes( - pattern=AttrString.maybe(pattern, name="pattern"), - ), - _RegexFullMatch.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _RegexFullMatch( + _RegexFullMatch.Attributes( + pattern=AttrString.maybe(pattern, name="pattern"), + ), + _RegexFullMatch.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def string_concat( @@ -1428,13 +1511,21 @@ def string_concat( Type constraints: - T: `tensor(string)` """ - return _StringConcat( - _StringConcat.Attributes(), - _StringConcat.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + return ( + _StringConcat( + _StringConcat.Attributes(), + _StringConcat.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) def string_split( @@ -1506,15 +1597,22 @@ def string_split( - T2: `tensor(string)` - T3: `tensor(int64)` """ - return _StringSplit( - _StringSplit.Attributes( - delimiter=AttrString.maybe(delimiter, name="delimiter"), - maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), - ), - _StringSplit.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + return ( + _StringSplit( + _StringSplit.Attributes( + delimiter=AttrString.maybe(delimiter, name="delimiter"), + maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), + ), + _StringSplit.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v21.py b/src/spox/opset/ai/onnx/v21.py index f4f027cc..2f2e2b4b 100644 --- a/src/spox/opset/ai/onnx/v21.py +++ b/src/spox/opset/ai/onnx/v21.py @@ -29,8 +29,8 @@ from spox._node import OpType from spox._standard import StandardNode from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v20 import ( _DFT, _GRU, @@ -385,11 +385,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 21) @@ -405,12 +405,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 21) @@ -434,9 +434,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -476,11 +476,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 21) @@ -497,13 +497,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 21) @@ -519,11 +519,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Flatten", "", 21) @@ -541,13 +541,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - bias: Var + X: _VarInfo + scale: _VarInfo + bias: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GroupNormalization", "", 21) @@ -563,11 +563,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 21) @@ -584,11 +584,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 21) @@ -604,13 +604,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Loop", "", 21) @@ -626,14 +626,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 21) @@ -649,18 +649,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - a: Var - a_scale: Var - a_zero_point: Var - b: Var - b_scale: Var - b_zero_point: Var - y_scale: Var - y_zero_point: Var + a: _VarInfo + a_scale: _VarInfo + a_zero_point: _VarInfo + b: _VarInfo + b_scale: _VarInfo + b_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearMatMul", "", 21) @@ -679,13 +679,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 21) @@ -701,12 +701,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 21) @@ -727,11 +727,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 21) @@ -748,11 +748,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 21) @@ -768,11 +768,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 21) @@ -788,12 +788,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 21) @@ -809,11 +809,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - transposed: Var + transposed: _VarInfo op_type = OpType("Transpose", "", 21) @@ -829,12 +829,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Var + data: _VarInfo + axes: _VarInfo @dataclass class Outputs(BaseOutputs): - expanded: Var + expanded: _VarInfo op_type = OpType("Unsqueeze", "", 21) @@ -964,15 +964,22 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def cast_like( @@ -1018,15 +1025,23 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + return ( + _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def constant( @@ -1084,18 +1099,23 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + return ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def constant_of_shape( @@ -1135,14 +1155,21 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def dequantize_linear( @@ -1213,17 +1240,26 @@ def dequantize_linear( - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + return ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def flatten( @@ -1265,14 +1301,21 @@ def flatten( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Flatten.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def group_normalization( @@ -1354,18 +1397,27 @@ def group_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _GroupNormalization.Inputs( - X=X, - scale=scale, - bias=bias, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + scale=scale, + bias=bias, + ) + return ( + _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), + _GroupNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + bias=unwrap_vars(bias), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) def identity( @@ -1393,12 +1445,19 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + return ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def if_( @@ -1455,16 +1514,23 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + return ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) def loop( @@ -1648,17 +1714,26 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + return ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) def pad( @@ -1821,17 +1896,27 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + return ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) def qlinear_matmul( @@ -1907,19 +1992,33 @@ def qlinear_matmul( - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` - T3: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=a, - a_scale=a_scale, - a_zero_point=a_zero_point, - b=b, - b_scale=b_scale, - b_zero_point=b_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + a=a, + a_scale=a_scale, + a_zero_point=a_zero_point, + b=b, + b_scale=b_scale, + b_zero_point=b_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + return ( + _QLinearMatMul( + _QLinearMatMul.Attributes(), + _QLinearMatMul.Inputs( + a=unwrap_vars(a), + a_scale=unwrap_vars(a_scale), + a_zero_point=unwrap_vars(a_zero_point), + b=unwrap_vars(b), + b_scale=unwrap_vars(b_scale), + b_zero_point=unwrap_vars(b_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def quantize_linear( @@ -2029,19 +2128,28 @@ def quantize_linear( - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - output_dtype=AttrInt64(output_dtype, name="output_dtype"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + return ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + output_dtype=AttrInt64(output_dtype, name="output_dtype"), + saturate=AttrInt64(saturate, name="saturate"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) def reshape( @@ -2095,15 +2203,23 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + return ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) def scan( @@ -2327,26 +2443,37 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + return ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) def shape( @@ -2425,15 +2552,22 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) def size( @@ -2463,12 +2597,19 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) def squeeze( @@ -2506,13 +2647,21 @@ def squeeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.squeezed + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _Squeeze( + _Squeeze.Attributes(), + _Squeeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .squeezed + ) def transpose( @@ -2549,14 +2698,21 @@ def transpose( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=data, - ), - ).outputs.transposed + input_prop_values = create_prop_dict( + data=data, + ) + return ( + _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), + _Transpose.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .transposed + ) def unsqueeze( @@ -2604,13 +2760,21 @@ def unsqueeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.expanded + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + return ( + _Unsqueeze( + _Unsqueeze.Attributes(), + _Unsqueeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .expanded + ) def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/tests/test_adapt.py b/tests/test_adapt.py index 3b0884ad..2a6a2450 100644 --- a/tests/test_adapt.py +++ b/tests/test_adapt.py @@ -15,9 +15,11 @@ from spox import Tensor, Var, argument, build, inline from spox._attributes import AttrInt64s from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._future import initializer from spox._graph import arguments, results from spox._node import OpType from spox._standard import StandardNode +from spox._var import _VarInfo @pytest.fixture @@ -82,11 +84,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 11) @@ -96,9 +98,14 @@ class Outputs(BaseOutputs): def squeeze11(_data: Var, _axes: Iterable[int]): - return Squeeze11( - Squeeze11.Attributes(AttrInt64s(_axes, "axes")), Squeeze11.Inputs(_data) - ).outputs.squeezed + return ( + Squeeze11( + Squeeze11.Attributes(AttrInt64s(_axes, "axes")), + Squeeze11.Inputs(_data._var_info), + ) + .get_output_vars() + .squeezed + ) @pytest.fixture @@ -157,6 +164,18 @@ def test_adapt_node_with_repeating_input_names(): build({"a": a}, {"b": b, "c": c}) +def test_adapt_node_initializer(): + init_data = [1.0, 2.0, 3.0] + + a = argument(Tensor(np.float32, ("N",))) + b = initializer(init_data, np.float32) + c = op18.equal(a, b) + d = op19.identity(a) + + model = build({"a": a}, {"b": b, "c": c, "d": d}) + np.testing.assert_allclose(model.graph.initializer[0].float_data, init_data) + + def test_inline_model_custom_node_only(): """Inline a model which only consists of a custom node. diff --git a/tests/test_constructors.py b/tests/test_constructors.py index 5dc3c641..55f0de1f 100644 --- a/tests/test_constructors.py +++ b/tests/test_constructors.py @@ -34,7 +34,10 @@ def test_variadic_no_input_list_mutation(onnx_helper): ins = [a, b] concat = op.concat(ins, axis=0) ins[1] = b - assert list(concat._op.inputs) == [a, b] + assert list(concat._op.inputs.get_var_infos().values()) == [ + a._var_info, + b._var_info, + ] def test_variadic_no_attr_mutation_array(onnx_helper): diff --git a/tests/test_custom_operator.py b/tests/test_custom_operator.py index 1c3c195c..66b369d0 100644 --- a/tests/test_custom_operator.py +++ b/tests/test_custom_operator.py @@ -19,6 +19,7 @@ from spox._graph import arguments, results from spox._node import Node, OpType from spox._type_system import Tensor, Type +from spox._var import _VarInfo # Define the Node for this operator - need to know the attributes, inputs and outputs statically @@ -32,18 +33,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo # This is optional, but is useful when defining the inference functions below. attrs: Attributes inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: # This is technically optional, but using an operator without type inference may be inconvenient. if self.inputs.X.type is None: return {} @@ -54,19 +55,23 @@ def infer_output_types(self) -> dict[str, Type]: ) return {"Y": t} - def propagate_values(self) -> dict[str, np.ndarray]: + def propagate_values(self, initializers) -> dict[str, np.ndarray]: # This is optional and implements value propagation ('partial data propagation' in ONNX). # In essence constant folding carried through for purposes of type inference. return ( - {"Y": np.linalg.inv(self.inputs.X._get_value())} - if self.inputs.X._value is not None + {"Y": np.linalg.inv(initializers["X"].value)} + if initializers["X"] is not None else {} ) # Define the operator constructor which is actually used def inverse(matrix: Var) -> Var: - return Inverse(Inverse.Attributes(), Inverse.Inputs(matrix)).outputs.Y + return ( + Inverse(Inverse.Attributes(), Inverse.Inputs(matrix._var_info)) + .get_output_vars(input_prop_values={"X": matrix._value}) + .Y + ) # Test the correct runtime behaviour with ORT diff --git a/tests/test_function.py b/tests/test_function.py index fd03d1b1..a02568cd 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -14,12 +14,12 @@ import spox.opset.ai.onnx.v17 as op from spox._attributes import Attr, AttrFloat32, _Ref -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs, BaseVars from spox._function import Function, to_function from spox._graph import arguments, results from spox._node import OpType from spox._type_system import Tensor -from spox._var import Var +from spox._var import Var, _VarInfo @pytest.fixture @@ -32,11 +32,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LinearFunction", "spox.test", 0) @@ -44,7 +44,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: # FIXME: At some point, attribute references should be properly type-hinted. a = op.constant( value_float=_Ref( @@ -57,18 +57,22 @@ def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: ) # type: ignore ) x = inputs.X - return self.Outputs(op.add(op.mul(a, x), b)) + return self.Outputs(op.add(op.mul(a, x), b)._var_info) def linear_inner( x: Var, a: Union[float, _Ref[float]], b: Union[float, _Ref[float]] ) -> Var: - return LinearFunction( - LinearFunction.Attributes( - slope_outer=AttrFloat32(a, "slope_outer"), - shift_outer=AttrFloat32(b, "shift_outer"), - ), - LinearFunction.Inputs(x), - ).outputs.Y + return ( + LinearFunction( + LinearFunction.Attributes( + slope_outer=AttrFloat32(a, "slope_outer"), + shift_outer=AttrFloat32(b, "shift_outer"), + ), + LinearFunction.Inputs(x._var_info), + ) + .get_output_vars(input_prop_values={"x": x._value}) + .Y + ) return linear_inner @@ -83,11 +87,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LinearFunction2", "spox.test", 0) @@ -95,24 +99,29 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: return self.Outputs( linear( inputs.X, _Ref(attrs["slope1"], outer_name="slope1", name="slope_outer"), _Ref(attrs["shift1"], outer_name="shift1", name="shift_outer"), - ) + )._var_info ) def linear_inner( x: Var, a: Union[float, _Ref[float]], b: Union[float, _Ref[float]] ) -> Var: - return LinearFunction2( - LinearFunction2.Attributes( - slope1=AttrFloat32(a, name="slope1"), shift1=AttrFloat32(b, "shift1") - ), - LinearFunction2.Inputs(x), - ).outputs.Y + return ( + LinearFunction2( + LinearFunction2.Attributes( + slope1=AttrFloat32(a, name="slope1"), + shift1=AttrFloat32(b, "shift1"), + ), + LinearFunction2.Inputs(x._var_info), + ) + .get_output_vars({"X": x._value}) + .Y + ) return linear_inner @@ -129,11 +138,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("CubicFunction", "spox.test.extra", 0) @@ -141,8 +150,8 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: - x = inputs.X + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: + x = inputs["X"] a = op.mul( linear( x, @@ -165,18 +174,22 @@ def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: ), ) y = op.add(a, b) - return self.Outputs(y) + return self.Outputs(y._var_info) def cubic_inner(x: Var, a3: float, a2: float, a1: float, a0: float) -> Var: - return CubicFunction( - CubicFunction.Attributes( - a3=AttrFloat32(a3, name="a3"), - a2=AttrFloat32(a2, name="a2"), - a1=AttrFloat32(a1, name="a1"), - a0=AttrFloat32(a0, name="a0"), - ), - CubicFunction.Inputs(X=x), - ).outputs.Y + return ( + CubicFunction( + CubicFunction.Attributes( + a3=AttrFloat32(a3, name="a3"), + a2=AttrFloat32(a2, name="a2"), + a1=AttrFloat32(a1, name="a1"), + a0=AttrFloat32(a0, name="a0"), + ), + CubicFunction.Inputs(X=x._var_info), + ) + .get_output_vars() + .Y + ) return cubic_inner diff --git a/tests/test_value_propagation.py b/tests/test_value_propagation.py index c26d4de1..9a7d3389 100644 --- a/tests/test_value_propagation.py +++ b/tests/test_value_propagation.py @@ -13,6 +13,7 @@ from spox._graph import arguments, results from spox._shape import Shape from spox._value_prop import ORTValue, PropValue +from spox._var import _VarInfo @pytest.fixture( @@ -27,7 +28,7 @@ def value_prop_backend(request): def dummy_var(typ=None, value=None): """Function for creating a ``var`` without an operator but with a type and value.""" - return Var(None, typ, value) # type: ignore + return Var(_VarInfo(None, typ), value) # type: ignore def assert_equal_value(var: Var, expected: ORTValue): @@ -132,6 +133,13 @@ def test_sequence_append(): ) +def test_variadict_max(): + a = op.const([2, 1, 4]) + b = op.const(3) + c = op.const([0]) + assert_equal_value(op.max([a, b, c]), [3, 3, 4]) + + def test_with_reconstruct(): a, b = arguments( a=_type_system.Tensor(np.int64, ()), diff --git a/tools/templates/class.jinja2 b/tools/templates/class.jinja2 index b2553675..d3367d2b 100644 --- a/tools/templates/class.jinja2 +++ b/tools/templates/class.jinja2 @@ -14,11 +14,11 @@ class _{{ schema.name }}(StandardNode): {% for input in schema.inputs %} {{ input.name }}: {% if is_optional(input) - %}Optional[Var]{% + %}Optional[_VarInfo]{% elif is_variadic(input) - %}Sequence[Var]{% + %}Sequence[_VarInfo]{% else - %}Var{% + %}_VarInfo{% endif %} {% endfor %} @@ -33,11 +33,11 @@ class _{{ schema.name }}(StandardNode): {% for output in schema.outputs %} {{ output.name }}: {% if is_optional(output) - %}Optional[Var]{% + %}Optional[_VarInfo]{% elif is_variadic(output) - %}Sequence[Var]{% + %}Sequence[_VarInfo]{% else - %}Var{% + %}_VarInfo{% endif %} {% endfor %} @@ -47,14 +47,14 @@ class _{{ schema.name }}(StandardNode): {% endif %} {% if type_inference %} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: {% filter indent(width=8) %} {%+ include type_inference %} {% endfilter %} {% endif %} {% if value_propagation %} - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: {% filter indent(width=8) %} {%+ include value_propagation %} {% endfilter %} diff --git a/tools/templates/construct.jinja2 b/tools/templates/construct.jinja2 index 53f76989..1d1cddd4 100644 --- a/tools/templates/construct.jinja2 +++ b/tools/templates/construct.jinja2 @@ -14,6 +14,11 @@ _{{ attr.name }}_subgraph: Graph = subgraph( ) {% endif %} {% endfor %} +input_prop_values = create_prop_dict( +{% for param in schema.inputs + %}{{param.name}}={{param.name}}, {% +endfor %} + ) return _{{ schema.name }}( _{{ schema.name }}.Attributes( {% for attr in attributes %} @@ -31,12 +36,13 @@ return _{{ schema.name }}( {% endfor %} ), _{{ schema.name }}.Inputs( {% for param in schema.inputs - %}{{param.name}}={{param.name}}, {% + %}{{param.name}}=unwrap_vars({{param.name}}), {% endfor %} ), {% if schema.outputs and is_variadic(schema.outputs[-1]) %}out_variadic={{ out_variadic_solution if out_variadic_solution else "{}_count".format(schema.outputs[-1].name) }}, {% -endif %}).outputs{% +endif %} + ).get_output_vars(input_prop_values=input_prop_values){% if schema.outputs | length <= 1 %}.{{ schema.outputs[0].name }}{% else %}._unpack_to_any(){% diff --git a/tools/templates/preamble.jinja2 b/tools/templates/preamble.jinja2 index e4e320f3..b0dd5d63 100644 --- a/tools/templates/preamble.jinja2 +++ b/tools/templates/preamble.jinja2 @@ -2,12 +2,11 @@ import typing import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( Any, Callable, - Iterable, Optional, - Sequence, Union, ) from typing import cast as typing_cast @@ -15,7 +14,7 @@ from typing import cast as typing_cast import numpy as np import numpy.typing as npt -from spox._var import Var, result_type +from spox._var import Var, _VarInfo, result_type, unwrap_vars, get_value, create_prop_dict from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( AttrDtype, @@ -34,4 +33,4 @@ from spox._internal_op import intro from spox._node import OpType from spox._standard import InferenceError, StandardNode from spox._type_system import Tensor, Type, Sequence as SpoxSequence -from spox._value_prop import PropValueType +from spox._value_prop import PropValueType, PropDict diff --git a/tools/templates/type_inference/compress11.jinja2 b/tools/templates/type_inference/compress11.jinja2 index 4fe26383..a2f9b24f 100644 --- a/tools/templates/type_inference/compress11.jinja2 +++ b/tools/templates/type_inference/compress11.jinja2 @@ -1,4 +1,4 @@ -self.infer_output_types_onnx() +self.infer_output_types_onnx(input_prop_values) inp, cond = self.inputs.input.unwrap_tensor(), self.inputs.condition.unwrap_tensor() if not inp.shape: return {'output': Tensor(inp.dtype, None)} @@ -14,4 +14,4 @@ if self.attrs.axis is not None: shape[axis] = None else: shape = [None] -return {'output': Tensor(inp.dtype, tuple(shape))} \ No newline at end of file +return {'output': Tensor(inp.dtype, tuple(shape))} diff --git a/tools/templates/type_inference/loop16-fix.jinja2 b/tools/templates/type_inference/loop16-fix.jinja2 index 775e9d57..b797693c 100644 --- a/tools/templates/type_inference/loop16-fix.jinja2 +++ b/tools/templates/type_inference/loop16-fix.jinja2 @@ -1,9 +1,9 @@ -output_types = super().infer_output_types() +output_types = super().infer_output_types({}) body = self.attrs.body.value n = len(body.requested_arguments) - 2 -carried_names = list(self.outputs.get_vars())[:n] +carried_names = list(self.outputs.get_var_infos())[:n] carried_types = [v.type for v in list(body.requested_results.values())[1:][:n]] for name, typ in zip(carried_names, carried_types):