From 8bcfe77c1db86baf5b8b63e3482d8404cfd99b08 Mon Sep 17 00:00:00 2001 From: cyy Date: Wed, 17 Jul 2024 13:39:54 +0800 Subject: [PATCH] Use std::string_view --- torch_xla/csrc/aten_xla_type.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/torch_xla/csrc/aten_xla_type.cpp b/torch_xla/csrc/aten_xla_type.cpp index 9f93f399837b..2a37fe4893b1 100644 --- a/torch_xla/csrc/aten_xla_type.cpp +++ b/torch_xla/csrc/aten_xla_type.cpp @@ -1376,7 +1376,7 @@ at::Tensor XLANativeFunctions::div(const at::Tensor& self, at::Tensor XLANativeFunctions::div( const at::Tensor& self, const at::Tensor& other, - std::optional rounding_mode) { + std::optional rounding_mode) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); at::ScalarType dtype = at::result_type(self, other); auto operands = GetBinaryOperands(self, UnwrapNumber(other, dtype)); @@ -1414,7 +1414,7 @@ at::Tensor XLANativeFunctions::dot(const at::Tensor& self, bridge::GetXlaTensor(self), bridge::GetXlaTensor(tensor))); } -at::Tensor XLANativeFunctions::einsum(std::string_view equation, +at::Tensor XLANativeFunctions::einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { std::string cleansed_equation = std::string(equation); @@ -1709,7 +1709,7 @@ at::Tensor XLANativeFunctions::gather(const at::Tensor& self, int64_t dim, } at::Tensor XLANativeFunctions::gelu(const at::Tensor& self, - std::string_view approximate) { + c10::string_view approximate) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return bridge::AtenFromXlaTensor( tensor_methods::gelu(bridge::GetXlaTensor(self), approximate)); @@ -1717,7 +1717,7 @@ at::Tensor XLANativeFunctions::gelu(const at::Tensor& self, at::Tensor XLANativeFunctions::gelu_backward(const at::Tensor& grad, const at::Tensor& self, - std::string_view approximate) { + c10::string_view approximate) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); at::ScalarType result_type = at::result_type(grad, self); return bridge::AtenFromXlaTensor(tensor_methods::gelu_backward( @@ -3074,7 +3074,7 @@ at::Tensor XLANativeFunctions::rsub(const at::Tensor& self, at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Tensor& src, - std::optional reduce) { + std::optional reduce) { XLATensorPtr self_tensor = bridge::GetXlaTensor(self); if (!reduce.has_value()) { return bridge::AtenFromXlaTensor( @@ -3095,7 +3095,7 @@ at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Scalar& value, - std::optional reduce) { + std::optional reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); XLATensorPtr self_tensor = bridge::GetXlaTensor(self); if (!reduce.has_value()) { @@ -3129,7 +3129,7 @@ at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Tensor& src, - std::string_view reduce) { + c10::string_view reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return scatter_reduce_helper(self, dim, index, src, reduce); } @@ -3137,7 +3137,7 @@ at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Scalar& value, - std::string_view reduce) { + c10::string_view reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return scatter_reduce_helper(self, dim, index, value, reduce); } @@ -3153,7 +3153,7 @@ at::Tensor XLANativeFunctions::scatter_add(const at::Tensor& self, int64_t dim, // supported at::Tensor XLANativeFunctions::scatter_reduce( const at::Tensor& self, int64_t dim, const at::Tensor& index, - const at::Tensor& src, std::string_view reduce, bool include_self) { + const at::Tensor& src, c10::string_view reduce, bool include_self) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); if ((reduce == "sum" || reduce == "prod" || reduce == "amin" || reduce == "amax") && @@ -3782,7 +3782,7 @@ at::Tensor& XLANativeFunctions::zero_(at::Tensor& self) { std::tuple XLANativeFunctions::_linalg_svd( const at::Tensor& self, bool full_matrices, bool compute_uv, - std::optional /* driver */) { + std::optional /* driver */) { // The optional driver string is only for CUDA with a cuSOLVER backend. TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); // As per https://pytorch.org/docs/stable/generated/torch.svd.html,