From 97aaa50f3a23e4b5569dd11edb99003cd69716d0 Mon Sep 17 00:00:00 2001 From: chenyu Date: Wed, 11 Dec 2024 22:59:35 -0500 Subject: [PATCH] remove duplicated UOp in Tensor init types [pr] (#8177) and a small comment --- tinygrad/engine/realize.py | 3 +-- tinygrad/tensor.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tinygrad/engine/realize.py b/tinygrad/engine/realize.py index fa7d02be851d5..8ba3451467ad6 100644 --- a/tinygrad/engine/realize.py +++ b/tinygrad/engine/realize.py @@ -12,8 +12,7 @@ logkerns, logkerns_level = open(getenv("LOGKERNS", ""), "a") if getenv("LOGKERNS", "") else None, getenv("LOGKERNS_LEVEL", 1) def get_kernel(renderer:Renderer, ast:UOp) -> Kernel: - if DEBUG >= 5: - print(ast) + if DEBUG >= 5: print(ast) k = Kernel(ast, opts=renderer).required_optimizations() if not NOOPT: if not (used_tensor_cores:=k.apply_tensor_cores(getenv("TC", 1))): k.hand_coded_optimizations() diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index ba5eef5e2fb99..22f1fa68b7f14 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -122,7 +122,7 @@ class Tensor(SimpleMathTrait): training: ClassVar[bool] = False no_grad: ClassVar[bool] = False - def __init__(self, data:Union[None, ConstType, UOp, bytes, List, Tuple, UOp, MultiLazyBuffer, 'np.ndarray', pathlib.Path], # type: ignore [name-defined] # noqa: F821 + def __init__(self, data:Union[None, ConstType, bytes, List, Tuple, UOp, MultiLazyBuffer, 'np.ndarray', pathlib.Path], # type: ignore [name-defined] # noqa: F821 device:Optional[Union[str, tuple, list]]=None, dtype:Optional[DTypeLike]=None, requires_grad:Optional[bool]=None): if dtype is not None: dtype = to_dtype(dtype) assert dtype is None or isinstance(dtype, DType), f"invalid dtype {dtype}" @@ -150,7 +150,7 @@ def __init__(self, data:Union[None, ConstType, UOp, bytes, List, Tuple, UOp, Mul elif isinstance(data, (list, tuple)): if dtype is None: if (d := fully_flatten(data)) and all(isinstance(s, bool) for s in d): dtype = dtypes.bool - else: dtype = dtypes.default_int if d and all_int(d) else dtypes.default_float + else: dtype = dtypes.default_int if d and all_int(d) else dtypes.default_float # NOTE: this works because all_int([True, False]) is True if dtype == dtypes.bfloat16: data = Tensor(_frompy(data, dtypes.float32), device=device).cast(dtypes.bfloat16).lazydata else: data = _frompy(data, dtype) elif str(type(data)) == "":