From 02bdf61b882a5d466326c622ee3def7560d35a1c Mon Sep 17 00:00:00 2001 From: Antonio Kim Date: Tue, 9 Apr 2024 08:49:55 -0700 Subject: [PATCH] Replace c10::optional with std::optional --- build_tools/autogen_ltc_backend.py | 12 +- build_tools/ci/build_posix.sh | 1 + .../csrc/base_lazy_backend/backend_impl.cpp | 2 +- .../ltc/csrc/base_lazy_backend/backend_impl.h | 4 +- .../ltc/csrc/base_lazy_backend/ir_builder.h | 2 +- .../mlir_native_functions.cpp | 46 +++---- .../base_lazy_backend/mlir_node_lowering.cpp | 2 +- .../ltc/csrc/base_lazy_backend/ops/to_copy.h | 20 +-- .../base_lazy_backend/shape_inference.cpp | 122 +++++++++--------- .../base_lazy_backend/utils/tensor_utils.cpp | 2 +- .../base_lazy_backend/utils/tensor_utils.h | 2 +- 11 files changed, 108 insertions(+), 107 deletions(-) diff --git a/build_tools/autogen_ltc_backend.py b/build_tools/autogen_ltc_backend.py index 40a64c1c1c2b8..30967d28fdbd7 100644 --- a/build_tools/autogen_ltc_backend.py +++ b/build_tools/autogen_ltc_backend.py @@ -123,12 +123,12 @@ def __init__(self, binary_dir): self.generated_path.mkdir(parents=True, exist_ok=True) # Create symlink to match doc structure - generated_path = self.backend_path.joinpath("generated").resolve() - if not generated_path.exists(): - generated_path.symlink_to( - os.path.relpath(self.generated_path, generated_path.parent), - target_is_directory=True, - ) + generated_path = self.backend_path.joinpath("generated") + generated_path.unlink(missing_ok=True) + generated_path.symlink_to( + os.path.relpath(self.generated_path, generated_path.parent), + target_is_directory=True, + ) self.tensor_class = "torch::lazy::LazyTensor" diff --git a/build_tools/ci/build_posix.sh b/build_tools/ci/build_posix.sh index bacb736ba1f21..fec5e252e8d7e 100755 --- a/build_tools/ci/build_posix.sh +++ b/build_tools/ci/build_posix.sh @@ -50,6 +50,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \ -DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$repo_root" \ -DLLVM_TARGETS_TO_BUILD=host \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ + -DTORCH_MLIR_ENABLE_LTC=ON echo "::endgroup::" echo "::group::Build" diff --git a/projects/ltc/csrc/base_lazy_backend/backend_impl.cpp b/projects/ltc/csrc/base_lazy_backend/backend_impl.cpp index dc044879669e1..56f6ff00c0a7a 100644 --- a/projects/ltc/csrc/base_lazy_backend/backend_impl.cpp +++ b/projects/ltc/csrc/base_lazy_backend/backend_impl.cpp @@ -117,7 +117,7 @@ TorchMlirBackendImpl::GetComputationDataFromNode(const Node *node) const { at::Tensor TorchMlirBackendImpl::MakeTensorFromComputationData( const BackendDataPtr data, - c10::optional logical_scalar_type) const { + std::optional logical_scalar_type) const { PRINT_FUNCTION(); TorchMlirBackendData *torch_mlir_data = diff --git a/projects/ltc/csrc/base_lazy_backend/backend_impl.h b/projects/ltc/csrc/base_lazy_backend/backend_impl.h index 4029cab1ea905..67ff8b656d88a 100644 --- a/projects/ltc/csrc/base_lazy_backend/backend_impl.h +++ b/projects/ltc/csrc/base_lazy_backend/backend_impl.h @@ -30,7 +30,7 @@ class TORCH_API TorchMlirBackendData : public BackendData { public: struct Info : public BackendData::Info { at::Tensor tensor; - c10::optional scalar; + std::optional scalar; bool requires_grad; std::string name; @@ -111,7 +111,7 @@ class TORCH_API TorchMlirBackendImpl : public BackendImplInterface { virtual at::Tensor MakeTensorFromComputationData( const BackendDataPtr data, - c10::optional logical_scalar_type) const override; + std::optional logical_scalar_type) const override; /** * Lowering, Compilation, Execution diff --git a/projects/ltc/csrc/base_lazy_backend/ir_builder.h b/projects/ltc/csrc/base_lazy_backend/ir_builder.h index 8e2179553808f..e2ed622950541 100644 --- a/projects/ltc/csrc/base_lazy_backend/ir_builder.h +++ b/projects/ltc/csrc/base_lazy_backend/ir_builder.h @@ -34,7 +34,7 @@ struct TorchMlirIrBuilder : IrBuilder { NodePtr MakeDeviceData(const std::shared_ptr& data) const override { return MakeNode(data); } NodePtr MakeScalar(const at::Scalar& value, const at::ScalarType& type) const override { return MakeNode(value, type); } NodePtr MakeExpand(const Value& input0, const std::vector& size, const bool& is_scalar_expand) const override { return MakeNode(input0, size, is_scalar_expand); } - NodePtr MakeCast(const Value& input0, const at::ScalarType& dtype, const c10::optional& stype = c10::nullopt) const override { return MakeNode(input0, dtype, stype); } + NodePtr MakeCast(const Value& input0, const at::ScalarType& dtype, const std::optional& stype = c10::nullopt) const override { return MakeNode(input0, dtype, stype); } NodePtr MakeTensorList(const OpList& inputs) const override { return MakeNode(inputs); } NodePtr MakeGeneric(const OpKind& op, const OpList& operands, const Shape& shape, const size_t& num_outputs = 1, const hash_t& hash_seed = static_cast(0x5a2d296e9)) const override { return MakeNode(op, operands, shape, num_outputs, hash_seed); } diff --git a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp index af680f224095d..ea544e68b5800 100644 --- a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp +++ b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp @@ -60,7 +60,7 @@ at::Tensor to_meta(const at::Tensor &tensor) { return out; } -c10::optional to_meta(const c10::optional &tensor) { +std::optional to_meta(const std::optional &tensor) { if (tensor.has_value()) { return to_meta(*tensor); } @@ -76,9 +76,9 @@ c10::optional to_meta(const c10::optional &tensor) { return outs; } -c10::List> -to_meta(const c10::List> &t_list) { - c10::List> outs; +c10::List> +to_meta(const c10::List> &t_list) { + c10::List> outs; outs.reserve(t_list.size()); for (const auto &tensor : t_list) { outs.push_back(to_meta(tensor)); @@ -94,7 +94,7 @@ namespace { [[maybe_unused]] at::Tensor CreateLtcTensor(const at::Tensor &tensor, - const c10::optional &device) { + const std::optional &device) { if (tensor.defined() && device) { return torch::lazy::CreateAtenFromLtcTensor( torch::lazy::LazyTensor::Create(tensor, *device)); @@ -102,8 +102,8 @@ CreateLtcTensor(const at::Tensor &tensor, return tensor; } -[[maybe_unused]] c10::optional -GetLtcDevice(const c10::optional &device) { +[[maybe_unused]] std::optional +GetLtcDevice(const std::optional &device) { if (!device) { return c10::nullopt; } @@ -148,7 +148,7 @@ void copy_(torch::lazy::LazyTensorPtr &input, torch::lazy::LazyTensorPtr &src) { // This should be safe to do, because every operator in the LT is functional. at::Tensor LazyNativeFunctions::clone(const at::Tensor &self, - c10::optional memory_format) { + std::optional memory_format) { auto self_lt = torch::lazy::TryGetLtcTensor(self); return torch::lazy::CreateAtenFromLtcTensor( self_lt->Create(self_lt->GetIrValue(), self_lt->GetDevice())); @@ -234,10 +234,10 @@ at::Tensor LazyNativeFunctions::_copy_from_and_resize(const at::Tensor &self, } at::Tensor LazyNativeFunctions::_to_copy( - const at::Tensor &self, c10::optional dtype, - c10::optional layout, c10::optional device, - c10::optional pin_memory, bool non_blocking, - c10::optional memory_format) { + const at::Tensor &self, std::optional dtype, + std::optional layout, std::optional device, + std::optional pin_memory, bool non_blocking, + std::optional memory_format) { PRINT_FUNCTION(); auto options = self.options(); if (dtype) { @@ -482,7 +482,7 @@ LazyNativeFunctions::split_copy_symint(const at::Tensor &self, at::Tensor LazyNativeFunctions::index( const at::Tensor &self, - const c10::List> &indices) { + const c10::List> &indices) { TORCH_LAZY_FN_COUNTER("lazy::"); auto common_device = torch::lazy::GetBackendDevice(self); TORCH_INTERNAL_ASSERT(common_device); @@ -491,7 +491,7 @@ at::Tensor LazyNativeFunctions::index( std::vector values; for (const auto &it : indices) { - c10::optional tensor = it; + std::optional tensor = it; LazyTensorPtr lazy_tensor = torch::lazy::TryGetLtcTensor(tensor.value_or(at::Tensor())); values.push_back( @@ -532,7 +532,7 @@ at::Tensor LazyNativeFunctions::index( } at::Tensor LazyNativeFunctions::index_put( - const at::Tensor &self, const c10::List> &indices, + const at::Tensor &self, const c10::List> &indices, const at::Tensor &values, bool accumulate) { TORCH_LAZY_FN_COUNTER("lazy::"); auto common_device = torch::lazy::GetBackendDevice(self); @@ -544,7 +544,7 @@ at::Tensor LazyNativeFunctions::index_put( std::vector indices_vector; for (const auto &it : indices) { - c10::optional tensor = it; + std::optional tensor = it; LazyTensorPtr lazy_tensor = torch::lazy::TryGetLtcTensor(tensor.value_or(at::Tensor())); indices_vector.push_back( @@ -616,9 +616,9 @@ at::Tensor LazyNativeFunctions::block_diag(at::TensorList tensors) { } at::Tensor LazyNativeFunctions::new_empty_strided_symint( const at::Tensor &self, c10::SymIntArrayRef size, - c10::SymIntArrayRef stride, c10::optional dtype, - c10::optional layout, c10::optional device, - c10::optional pin_memory) { + c10::SymIntArrayRef stride, std::optional dtype, + std::optional layout, std::optional device, + std::optional pin_memory) { if (!device || device->type() == c10::DeviceType::Lazy) { return at::functionalization::functionalize_aten_op_symint::call(self, size, stride, dtype, layout, device, @@ -628,8 +628,8 @@ at::Tensor LazyNativeFunctions::new_empty_strided_symint( // lazy_tensor.new_empty_strided(..., "cpu") we need to avoid explicit // functionalization. To do that we create regular cpu tensors. at::Tensor t = at::empty_symint( - size, (dtype ? dtype : c10::optional(self.scalar_type())), - (layout ? layout : c10::optional(self.layout())), device, + size, (dtype ? dtype : std::optional(self.scalar_type())), + (layout ? layout : std::optional(self.layout())), device, pin_memory, c10::nullopt); return t.as_strided_symint(size, stride, /*storage_offset=*/0); } @@ -679,8 +679,8 @@ at::Tensor LazyNativeFunctions::_trilinear( unroll_dim); } at::Tensor LazyNativeFunctions::linalg_pinv( - const at::Tensor &self, const c10::optional &atol, - const c10::optional &rtol, bool hermitian) { + const at::Tensor &self, const std::optional &atol, + const std::optional &rtol, bool hermitian) { return at::functionalization::functionalize_aten_op::call(self, atol, rtol, hermitian); } diff --git a/projects/ltc/csrc/base_lazy_backend/mlir_node_lowering.cpp b/projects/ltc/csrc/base_lazy_backend/mlir_node_lowering.cpp index b52b724f0f16a..96deb9aeb9ba7 100644 --- a/projects/ltc/csrc/base_lazy_backend/mlir_node_lowering.cpp +++ b/projects/ltc/csrc/base_lazy_backend/mlir_node_lowering.cpp @@ -159,7 +159,7 @@ c10::TensorType &cast_tensor_type(c10::TypePtr value_type) { return *tensor_type.get(); } -c10::optional> +std::optional> get_tensor_type_shape(c10::TensorType &tensor_type) { auto &symbolic_shape = tensor_type.symbolic_sizes(); if (!symbolic_shape.rank()) { diff --git a/projects/ltc/csrc/base_lazy_backend/ops/to_copy.h b/projects/ltc/csrc/base_lazy_backend/ops/to_copy.h index 4023550314748..f0ffdb0b134a0 100644 --- a/projects/ltc/csrc/base_lazy_backend/ops/to_copy.h +++ b/projects/ltc/csrc/base_lazy_backend/ops/to_copy.h @@ -25,11 +25,11 @@ namespace lazy { class ToCopy : public torch::lazy::TorchMlirNode { public: ToCopy(const torch::lazy::Value &self, - const c10::optional &dtype, - const c10::optional &layout, - const c10::optional &device, - const c10::optional &pin_memory, const bool &non_blocking, - const c10::optional &memory_format, + const std::optional &dtype, + const std::optional &layout, + const std::optional &device, + const std::optional &pin_memory, const bool &non_blocking, + const std::optional &memory_format, std::vector &&shapes) : torch::lazy::TorchMlirNode( torch::lazy::OpKind(at::aten::_to_copy), {self}, std::move(shapes), @@ -95,12 +95,12 @@ class ToCopy : public torch::lazy::TorchMlirNode { return _to_copy_out; } - c10::optional dtype; - c10::optional layout; - c10::optional device; - c10::optional pin_memory; + std::optional dtype; + std::optional layout; + std::optional device; + std::optional pin_memory; bool non_blocking; - c10::optional memory_format; + std::optional memory_format; }; } // namespace lazy } // namespace torch diff --git a/projects/ltc/csrc/base_lazy_backend/shape_inference.cpp b/projects/ltc/csrc/base_lazy_backend/shape_inference.cpp index 8e3b2c0702d38..e262d1bd8dae3 100644 --- a/projects/ltc/csrc/base_lazy_backend/shape_inference.cpp +++ b/projects/ltc/csrc/base_lazy_backend/shape_inference.cpp @@ -149,15 +149,15 @@ std::vector compute_shape_mul(const at::Tensor &self, std::vector compute_shape_var(const at::Tensor &self, at::OptionalIntArrayRef dim, - const c10::optional &correction, bool keepdim) { + const ::std::optional &correction, bool keepdim) { // Result of variance is scalar tensor. return {Shape(self.scalar_type(), {})}; } std::vector -compute_shape_nan_to_num(const at::Tensor &self, c10::optional nan, - c10::optional posinf, - c10::optional neginf) { +compute_shape_nan_to_num(const at::Tensor &self, ::std::optional nan, + ::std::optional posinf, + ::std::optional neginf) { return {Shape(self.scalar_type(), self.sizes().vec())}; } @@ -226,8 +226,8 @@ std::vector compute_shape_fmod(const at::Tensor &self, } std::vector compute_shape_native_group_norm( - const at::Tensor &input, const c10::optional &weight, - const c10::optional &bias, int64_t N, int64_t C, int64_t HxW, + const at::Tensor &input, const ::std::optional &weight, + const ::std::optional &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { TORCH_CHECK(input.sizes().size() >= 2, @@ -263,8 +263,9 @@ compute_shape_im2col(const at::Tensor &self, at::IntArrayRef kernel_size, std::vector compute_shape_native_group_norm_backward( const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, - const at::Tensor &rstd, const c10::optional &weight, int64_t N, - int64_t C, int64_t HxW, int64_t group, ::std::array output_mask) { + const at::Tensor &rstd, const ::std::optional &weight, + int64_t N, int64_t C, int64_t HxW, int64_t group, + ::std::array output_mask) { TORCH_CHECK(input.sizes().size() >= 2, "Input tensor must have at least batch and channel dimensions!"); @@ -317,20 +318,20 @@ compute_shape_reflection_pad2d(const at::Tensor &self, std::vector compute_shape_uniform(const at::Tensor &self, double from, double to, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_normal_functional(const at::Tensor &self, double mean, double std, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_multinomial(const at::Tensor &self, int64_t num_samples, bool replacement, - c10::optional generator) { + ::std::optional generator) { // Input tensor can be either 1D or 2D. The last dim of output // should be 'num_samples'. So the output shape can be either // [num_samples] or [m, num_samples]. @@ -341,30 +342,29 @@ compute_shape_multinomial(const at::Tensor &self, int64_t num_samples, } std::vector -compute_shape_eye(int64_t n, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +compute_shape_eye(int64_t n, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { auto out_meta = at::eye(n, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; } std::vector -compute_shape_eye(int64_t n, int64_t m, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +compute_shape_eye(int64_t n, int64_t m, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { auto out_meta = at::eye(n, m, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; } -std::vector -compute_shape_arange(const at::Scalar &end, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +std::vector compute_shape_arange( + const at::Scalar &end, ::std::optional dtype, + ::std::optional layout, ::std::optional device, + ::std::optional pin_memory) { auto out_meta = at::arange(end, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; @@ -372,8 +372,8 @@ compute_shape_arange(const at::Scalar &end, c10::optional dtype, std::vector compute_shape_arange( const at::Scalar &start, const at::Scalar &end, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { auto out_meta = at::arange(start, end, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; @@ -381,8 +381,8 @@ std::vector compute_shape_arange( std::vector compute_shape_arange( const at::Scalar &start, const at::Scalar &end, const at::Scalar &step, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { auto out_meta = at::arange(start, end, step, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; @@ -390,44 +390,44 @@ std::vector compute_shape_arange( std::vector compute_shape_full( at::IntArrayRef size, const at::Scalar &fill_value, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector -compute_shape_ones(at::IntArrayRef size, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +compute_shape_ones(at::IntArrayRef size, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector -compute_shape_zeros(at::IntArrayRef size, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +compute_shape_zeros(at::IntArrayRef size, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector -compute_shape_empty(at::IntArrayRef size, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { +compute_shape_empty(at::IntArrayRef size, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory, + ::std::optional memory_format) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector compute_shape_empty_strided( at::IntArrayRef size, at::IntArrayRef stride, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } @@ -443,46 +443,46 @@ std::vector compute_shape_fill(const at::Tensor &self, } std::vector -compute_shape_randn(at::IntArrayRef size, c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +compute_shape_randn(at::IntArrayRef size, ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector compute_shape_randint( - int64_t high, at::IntArrayRef size, c10::optional dtype, - c10::optional layout, c10::optional device, - c10::optional pin_memory) { + int64_t high, at::IntArrayRef size, ::std::optional dtype, + ::std::optional layout, ::std::optional device, + ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector compute_shape_randint( int64_t low, int64_t high, at::IntArrayRef size, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { return { Shape(dtype.value_or(at::get_default_dtype_as_scalartype()), size.vec())}; } std::vector compute_shape_resize(const at::Tensor &self, at::IntArrayRef size, - c10::optional memory_format) { + ::std::optional memory_format) { return {Shape(self.scalar_type(), size.vec())}; } std::vector compute_shape_bernoulli(const at::Tensor &self, const at::Tensor &p, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_scalar_tensor( - const at::Scalar &s, c10::optional dtype, - c10::optional layout, c10::optional device, - c10::optional pin_memory) { + const at::Scalar &s, ::std::optional dtype, + ::std::optional layout, ::std::optional device, + ::std::optional pin_memory) { return {Shape(dtype.value_or(s.type()), c10::ArrayRef{})}; } @@ -494,8 +494,8 @@ std::vector compute_shape_roll(const at::Tensor &self, std::vector compute_shape_linspace( const at::Scalar &start, const at::Scalar &end, int64_t steps, - c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory) { + ::std::optional dtype, ::std::optional layout, + ::std::optional device, ::std::optional pin_memory) { auto out_meta = at::linspace(start, end, steps, dtype, layout, c10::Device(c10::kMeta), pin_memory); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; diff --git a/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.cpp b/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.cpp index 71a0e89f4c645..5a25deb7655ca 100644 --- a/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.cpp +++ b/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.cpp @@ -75,7 +75,7 @@ torch::lazy::DeviceData *device_data_cast(const torch::lazy::Value &value) { torch::lazy::DeviceData * device_data_cast(const at::Tensor &tensor, - c10::optional device) { + std::optional device) { if (!device) { device = torch::lazy::GetBackendDevice(tensor); } diff --git a/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.h b/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.h index f8e5e317294a5..dca5f7fcd086f 100644 --- a/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.h +++ b/projects/ltc/csrc/base_lazy_backend/utils/tensor_utils.h @@ -22,7 +22,7 @@ TORCH_API torch::lazy::DeviceData * device_data_cast(const torch::lazy::Value &value); TORCH_API torch::lazy::DeviceData *device_data_cast( const at::Tensor &tensor, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); } // namespace lazy } // namespace torch