Skip to content

Commit

Permalink
Use std::optional (#7228)
Browse files Browse the repository at this point in the history
  • Loading branch information
cyyever committed Jun 10, 2024
1 parent 291764f commit 2c71e9b
Show file tree
Hide file tree
Showing 55 changed files with 487 additions and 487 deletions.
4 changes: 2 additions & 2 deletions test/cpp/cpp_test_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -379,11 +379,11 @@ void TestBackward(
// Calculating higher order derivative requires create_graph=true
bool create_graph = d != derivative_level;
outs = torch::autograd::grad({sum}, inputs_w_grad, /*grad_outputs=*/{},
/*retain_graph=*/c10::nullopt,
/*retain_graph=*/std::nullopt,
/*create_graph=*/create_graph,
/*allow_unused=*/true);
xouts = torch::autograd::grad({xsum}, xinputs_w_grad, /*grad_outputs=*/{},
/*retain_graph=*/c10::nullopt,
/*retain_graph=*/std::nullopt,
/*create_graph=*/create_graph,
/*allow_unused=*/true);
for (size_t i = 0; i < outs.size(); ++i) {
Expand Down
14 changes: 7 additions & 7 deletions test/cpp/test_aten_xla_tensor_1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1080,10 +1080,10 @@ TEST_F(AtenXlaTensorTest, TestUpsampleNearest2DWithScale) {
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_input = CopyToDevice(input, device);
torch::Tensor result = torch::upsample_nearest2d(
input, c10::nullopt,
input, std::nullopt,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
torch::Tensor xla_result = torch::upsample_nearest2d(
xla_input, c10::nullopt,
xla_input, std::nullopt,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
AllClose(result, xla_result);
});
Expand Down Expand Up @@ -1116,7 +1116,7 @@ TEST_F(AtenXlaTensorTest, TestUpsampleNearest2DBackwardWithScale) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::upsample_nearest2d(
inputs[0], c10::nullopt,
inputs[0], std::nullopt,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
};
ForEachDevice([&](const torch::Device& device) {
Expand Down Expand Up @@ -1208,10 +1208,10 @@ TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DWithScale) {
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_input = CopyToDevice(input, device);
torch::Tensor result = torch::upsample_bilinear2d(
input, c10::nullopt, align_corners,
input, std::nullopt, align_corners,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
torch::Tensor xla_result = torch::upsample_bilinear2d(
xla_input, c10::nullopt, align_corners,
xla_input, std::nullopt, align_corners,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
AllClose(result, xla_result, /*rtol=*/1e-4, /*atol=*/1e-4);
});
Expand Down Expand Up @@ -1266,7 +1266,7 @@ TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DBackwardWithScale) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::upsample_bilinear2d(
inputs[0], c10::nullopt, align_corners,
inputs[0], std::nullopt, align_corners,
at::ArrayRef<double>{img_info.scale_h, img_info.scale_w});
};
ForEachDevice([&](const torch::Device& device) {
Expand Down Expand Up @@ -2389,7 +2389,7 @@ TEST_F(AtenXlaTensorTest, TestCount_Nonzero_with_single_dim) {
a[0][1] = 1.0;
a[0][2] = 1.0;
a[2][2] = 1.0;
std::vector<c10::optional<long int>> dims = {0, -1};
std::vector<std::optional<long int>> dims = {0, -1};
for (int i = 0; i < dims.size(); i++) {
torch::Tensor b = torch::count_nonzero(a, dims[i]);
ForEachDevice([&](const torch::Device& device) {
Expand Down
44 changes: 22 additions & 22 deletions test/cpp/test_aten_xla_tensor_2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1022,7 +1022,7 @@ TEST_F(AtenXlaTensorTest, TestStdInDim) {
TEST_F(AtenXlaTensorTest, TestStdWithCorrection) {
torch::Tensor a = torch::rand({4, 3, 4}, torch::TensorOptions(torch::kFloat));
int rank = a.dim();
c10::optional<torch::Scalar> corrections[] = {1, 2, 1.3, c10::nullopt};
std::optional<torch::Scalar> corrections[] = {1, 2, 1.3, std::nullopt};
for (const auto& correction : corrections) {
for (auto keepdim : {true, false}) {
for (const auto& dim :
Expand All @@ -1041,7 +1041,7 @@ TEST_F(AtenXlaTensorTest, TestStdWithCorrection) {
TEST_F(AtenXlaTensorTest, TestStdMeanWithCorrection) {
torch::Tensor a = torch::rand({4, 3, 4}, torch::TensorOptions(torch::kFloat));
int rank = a.dim();
c10::optional<torch::Scalar> corrections[] = {1, 2, 1.3, c10::nullopt};
std::optional<torch::Scalar> corrections[] = {1, 2, 1.3, std::nullopt};
for (const auto& correction : corrections) {
for (auto keepdim : {true, false}) {
for (const auto& dim :
Expand Down Expand Up @@ -1175,7 +1175,7 @@ TEST_F(AtenXlaTensorTest, TestVarWithDim) {

TEST_F(AtenXlaTensorTest, TestVarWithCorrection) {
torch::Tensor a = torch::rand({4, 3, 4}, torch::TensorOptions(torch::kFloat));
c10::optional<torch::Scalar> corrections[] = {1, 2, 1.3, c10::nullopt};
std::optional<torch::Scalar> corrections[] = {1, 2, 1.3, std::nullopt};
for (const auto& dim : std::vector<std::vector<int64_t>>{{0, 1}, {-3, -2}}) {
for (bool keepDim : {true, false}) {
for (const auto& correction : corrections) {
Expand All @@ -1194,7 +1194,7 @@ TEST_F(AtenXlaTensorTest, TestVarWithCorrection) {

TEST_F(AtenXlaTensorTest, TestVarMeanWithCorrection) {
torch::Tensor a = torch::rand({4, 3, 4}, torch::TensorOptions(torch::kFloat));
c10::optional<torch::Scalar> corrections[] = {1, 2, 1.3, c10::nullopt};
std::optional<torch::Scalar> corrections[] = {1, 2, 1.3, std::nullopt};
for (const auto& dim : std::vector<std::vector<int64_t>>{{0, 1}, {-3, -2}}) {
for (const auto& correction : corrections) {
for (auto keepdim : {true, false}) {
Expand Down Expand Up @@ -2076,10 +2076,10 @@ TEST_F(AtenXlaTensorTest, TestCumProdCastLong) {

TEST_F(AtenXlaTensorTest, TestArgMin) {
torch::Tensor a = torch::rand({4, 4, 4}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::argmin(a, c10::nullopt, /*keepdim=*/false);
torch::Tensor b = torch::argmin(a, std::nullopt, /*keepdim=*/false);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::argmin(xla_a, c10::nullopt, /*keepdim=*/false);
torch::Tensor xla_b = torch::argmin(xla_a, std::nullopt, /*keepdim=*/false);
AllEqual(b, xla_b);
});

Expand Down Expand Up @@ -2119,10 +2119,10 @@ TEST_F(AtenXlaTensorTest, TestArgMinDimKeep) {

TEST_F(AtenXlaTensorTest, TestArgMinDimKeepNoDim) {
torch::Tensor a = torch::rand({4, 4, 4}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::argmin(a, c10::nullopt, /*keepdim=*/true);
torch::Tensor b = torch::argmin(a, std::nullopt, /*keepdim=*/true);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::argmin(xla_a, c10::nullopt, /*keepdim=*/true);
torch::Tensor xla_b = torch::argmin(xla_a, std::nullopt, /*keepdim=*/true);
AllEqual(b, xla_b);
});

Expand Down Expand Up @@ -2160,10 +2160,10 @@ TEST_F(AtenXlaTensorTest, TestArgMinWrapper) {

TEST_F(AtenXlaTensorTest, TestArgMax) {
torch::Tensor a = torch::rand({4, 4, 4}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::argmax(a, c10::nullopt, /*keepdim=*/false);
torch::Tensor b = torch::argmax(a, std::nullopt, /*keepdim=*/false);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::argmax(xla_a, c10::nullopt, /*keepdim=*/false);
torch::Tensor xla_b = torch::argmax(xla_a, std::nullopt, /*keepdim=*/false);
AllEqual(b, xla_b);
});

Expand Down Expand Up @@ -2203,10 +2203,10 @@ TEST_F(AtenXlaTensorTest, TestArgMaxDimKeep) {

TEST_F(AtenXlaTensorTest, TestArgMaxDimKeepNoDim) {
torch::Tensor a = torch::rand({4, 4, 4}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::argmax(a, c10::nullopt, /*keepdim=*/true);
torch::Tensor b = torch::argmax(a, std::nullopt, /*keepdim=*/true);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::argmax(xla_a, c10::nullopt, /*keepdim=*/true);
torch::Tensor xla_b = torch::argmax(xla_a, std::nullopt, /*keepdim=*/true);
AllEqual(b, xla_b);
});

Expand All @@ -2216,10 +2216,10 @@ TEST_F(AtenXlaTensorTest, TestArgMaxDimKeepNoDim) {

TEST_F(AtenXlaTensorTest, TestArgMaxSameValue) {
torch::Tensor a = torch::ones({4, 4, 4}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::argmax(a, c10::nullopt, /*keepdim=*/false);
torch::Tensor b = torch::argmax(a, std::nullopt, /*keepdim=*/false);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::argmax(xla_a, c10::nullopt, /*keepdim=*/false);
torch::Tensor xla_b = torch::argmax(xla_a, std::nullopt, /*keepdim=*/false);
AllEqual(b, xla_b);
});

Expand Down Expand Up @@ -2511,10 +2511,10 @@ TEST_F(AtenXlaTensorTest, TestClampMinMaxTensor) {
TEST_F(AtenXlaTensorTest, TestClampMin) {
torch::Tensor a = torch::rand({2, 2}, torch::TensorOptions(torch::kFloat));
torch::Scalar min_val(0.311);
torch::Tensor b = torch::clamp(a, min_val, c10::nullopt);
torch::Tensor b = torch::clamp(a, min_val, std::nullopt);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::clamp(xla_a, min_val, c10::nullopt);
torch::Tensor xla_b = torch::clamp(xla_a, min_val, std::nullopt);
AllClose(b, xla_b);
});
ExpectCounterNotChanged("aten::.*", cpp_test::GetIgnoredCounters());
Expand All @@ -2525,11 +2525,11 @@ TEST_F(AtenXlaTensorTest, TestClampMinTensor) {
torch::Tensor a = torch::rand({2, 2}, torch::TensorOptions(torch::kFloat));
torch::Tensor min_tensor =
torch::rand({1, 2}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::clamp(a, min_tensor, c10::nullopt);
torch::Tensor b = torch::clamp(a, min_tensor, std::nullopt);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_min_tensor = CopyToDevice(min_tensor, device);
torch::Tensor xla_b = torch::clamp(xla_a, xla_min_tensor, c10::nullopt);
torch::Tensor xla_b = torch::clamp(xla_a, xla_min_tensor, std::nullopt);
AllClose(b, xla_b);
});
ExpectCounterNotChanged("aten::.*", cpp_test::GetIgnoredCounters());
Expand All @@ -2539,10 +2539,10 @@ TEST_F(AtenXlaTensorTest, TestClampMinTensor) {
TEST_F(AtenXlaTensorTest, TestClampMax) {
torch::Tensor a = torch::rand({2, 2}, torch::TensorOptions(torch::kFloat));
torch::Scalar max_val(0.409);
torch::Tensor b = torch::clamp(a, c10::nullopt, max_val);
torch::Tensor b = torch::clamp(a, std::nullopt, max_val);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_b = torch::clamp(xla_a, c10::nullopt, max_val);
torch::Tensor xla_b = torch::clamp(xla_a, std::nullopt, max_val);
AllClose(b, xla_b);
});
ExpectCounterNotChanged("aten::.*", cpp_test::GetIgnoredCounters());
Expand All @@ -2553,11 +2553,11 @@ TEST_F(AtenXlaTensorTest, TestClampMaxTensor) {
torch::Tensor a = torch::rand({2, 2}, torch::TensorOptions(torch::kFloat));
torch::Tensor max_tensor =
torch::rand({2, 1}, torch::TensorOptions(torch::kFloat));
torch::Tensor b = torch::clamp(a, c10::nullopt, max_tensor);
torch::Tensor b = torch::clamp(a, std::nullopt, max_tensor);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor xla_a = CopyToDevice(a, device);
torch::Tensor xla_max_tensor = CopyToDevice(max_tensor, device);
torch::Tensor xla_b = torch::clamp(xla_a, c10::nullopt, xla_max_tensor);
torch::Tensor xla_b = torch::clamp(xla_a, std::nullopt, xla_max_tensor);
AllClose(b, xla_b);
});
ExpectCounterNotChanged("aten::.*", cpp_test::GetIgnoredCounters());
Expand Down
8 changes: 4 additions & 4 deletions test/cpp/test_aten_xla_tensor_4.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -391,8 +391,8 @@ TEST_F(AtenXlaTensorTest, TestDiv) {
}

TEST_F(AtenXlaTensorTest, TestDivWithRoundingMode) {
c10::optional<c10::string_view> rounding_modes[] = {"trunc", "floor",
c10::nullopt};
std::optional<c10::string_view> rounding_modes[] = {"trunc", "floor",
std::nullopt};
for (const auto& rounding_mode : rounding_modes) {
for (torch::ScalarType scalar_type1 :
{torch::kFloat, torch::kByte, torch::kChar, torch::kShort, torch::kInt,
Expand Down Expand Up @@ -453,8 +453,8 @@ TEST_F(AtenXlaTensorTest, TestDivInPlace) {
}

TEST_F(AtenXlaTensorTest, TestDivInPlaceWithRoundingMode) {
c10::optional<c10::string_view> rounding_modes[] = {"trunc", "floor",
c10::nullopt};
std::optional<c10::string_view> rounding_modes[] = {"trunc", "floor",
std::nullopt};
for (const auto& rounding_mode : rounding_modes) {
for (torch::ScalarType scalar_type1 : {torch::kFloat}) {
torch::Tensor a =
Expand Down
10 changes: 5 additions & 5 deletions test/cpp/test_lazy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ TEST_F(LazyTest, TestXlaShapeToLazyWithF64) {
torch::lazy::Shape lazy_shape = XlaHelpers::ConvertXlaShapeToLazy(xla_shape);
std::vector<int64_t> lazy_dimensions =
torch_xla::runtime::util::ToVector<int64_t>(lazy_shape.sizes());
const c10::optional<std::vector<bool>>& lazy_dynamic_dimensions =
const std::optional<std::vector<bool>>& lazy_dynamic_dimensions =
lazy_shape.is_symbolic();
EXPECT_EQ(lazy_shape.scalar_type(), at::ScalarType::Double);
EXPECT_EQ(lazy_dimensions,
Expand All @@ -46,7 +46,7 @@ TEST_F(LazyTest, TestXlaShapeToLazyWithPred) {
torch::lazy::Shape lazy_shape = XlaHelpers::ConvertXlaShapeToLazy(xla_shape);
std::vector<int64_t> lazy_dimensions =
torch_xla::runtime::util::ToVector<int64_t>(lazy_shape.sizes());
const c10::optional<std::vector<bool>>& lazy_dynamic_dimensions =
const std::optional<std::vector<bool>>& lazy_dynamic_dimensions =
lazy_shape.is_symbolic();
EXPECT_EQ(lazy_shape.scalar_type(), at::ScalarType::Bool);
EXPECT_EQ(lazy_dimensions,
Expand All @@ -68,7 +68,7 @@ TEST_F(LazyTest, TestXlaShapeToLazyWithU64) {
torch::lazy::Shape lazy_shape = XlaHelpers::ConvertXlaShapeToLazy(xla_shape);
std::vector<int64_t> lazy_dimensions =
torch_xla::runtime::util::ToVector<int64_t>(lazy_shape.sizes());
const c10::optional<std::vector<bool>>& lazy_dynamic_dimensions =
const std::optional<std::vector<bool>>& lazy_dynamic_dimensions =
lazy_shape.is_symbolic();
EXPECT_EQ(lazy_shape.scalar_type(), at::ScalarType::Long);
EXPECT_EQ(lazy_dimensions,
Expand All @@ -90,7 +90,7 @@ TEST_F(LazyTest, TestXlaShapeToLazyWithMultipleDimensions) {
torch::lazy::Shape lazy_shape = XlaHelpers::ConvertXlaShapeToLazy(xla_shape);
std::vector<int64_t> lazy_dimensions =
torch_xla::runtime::util::ToVector<int64_t>(lazy_shape.sizes());
const c10::optional<std::vector<bool>>& lazy_dynamic_dimensions =
const std::optional<std::vector<bool>>& lazy_dynamic_dimensions =
lazy_shape.is_symbolic();
EXPECT_EQ(lazy_shape.scalar_type(), at::ScalarType::Double);
EXPECT_EQ(lazy_dimensions,
Expand All @@ -112,7 +112,7 @@ TEST_F(LazyTest, TestXlaShapeToLazyWithDynamicDimensions) {
torch::lazy::Shape lazy_shape = XlaHelpers::ConvertXlaShapeToLazy(xla_shape);
std::vector<int64_t> lazy_dimensions =
torch_xla::runtime::util::ToVector<int64_t>(lazy_shape.sizes());
const c10::optional<std::vector<bool>>& lazy_dynamic_dimensions =
const std::optional<std::vector<bool>>& lazy_dynamic_dimensions =
lazy_shape.is_symbolic();
EXPECT_EQ(lazy_shape.scalar_type(), at::ScalarType::Double);
EXPECT_EQ(lazy_dimensions,
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/test_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ namespace {

bool CheckBidirectionalConversion(
const at::Tensor& input, at::ScalarType dest_element_type,
c10::optional<xla::PrimitiveType> xla_type = c10::nullopt) {
std::optional<xla::PrimitiveType> xla_type = std::nullopt) {
xla::Literal literal =
GetTensorLiteral(input, /*shape=*/nullptr, /*device=*/nullptr);
if (xla_type) {
Expand Down
Loading

0 comments on commit 2c71e9b

Please sign in to comment.