xref: /aosp_15_r20/external/pytorch/test/cpp/api/tensor_cuda.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <gtest/gtest.h>
2 
3 #include <ATen/ATen.h>
4 
5 #include <cmath>
6 
7 #define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_)            \
8   ASSERT_TRUE(                                                             \
9       tensor.device().type() == at::Device((device_), (index_)).type());   \
10   ASSERT_TRUE(                                                             \
11       tensor.device().index() == at::Device((device_), (index_)).index()); \
12   ASSERT_EQ(tensor.dtype(), (type_));                                      \
13   ASSERT_TRUE(tensor.layout() == (layout_))
14 
TEST(TensorTest,AllocatesTensorOnTheCorrectDevice_MultiCUDA)15 TEST(TensorTest, AllocatesTensorOnTheCorrectDevice_MultiCUDA) {
16   auto tensor = at::tensor({1, 2, 3}, at::device({at::kCUDA, 1}));
17   ASSERT_EQ(tensor.device().type(), at::Device::Type::CUDA);
18   ASSERT_EQ(tensor.device().index(), 1);
19 }
20 
TEST(TensorTest,ToDevice_MultiCUDA)21 TEST(TensorTest, ToDevice_MultiCUDA) {
22   auto tensor = at::empty({3, 4});
23   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
24 
25   tensor = tensor.to({at::kCUDA, 1});
26   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
27 
28   tensor = tensor.to({at::kCUDA, 0});
29   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
30 
31   tensor = tensor.to({at::kCUDA, 1});
32   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
33 
34   tensor = tensor.to(at::Device(at::kCPU));
35   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
36 
37   tensor = tensor.to(at::kCUDA);
38   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
39 
40   tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}));
41   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
42 
43   tensor = tensor.to(at::TensorOptions({at::kCUDA, 0}));
44   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
45 
46   tensor = tensor.to(at::TensorOptions(at::kDouble));
47   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
48 
49   tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}));
50   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
51 
52   tensor = tensor.to(at::TensorOptions(at::kInt));
53   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
54 
55   tensor = tensor.to(at::TensorOptions(at::Device(at::kCPU)));
56   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
57 
58   tensor = tensor.to(at::TensorOptions(at::kCUDA));
59   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kInt, at::kStrided);
60 }
61 
TEST(TensorTest,ToTensorAndTensorAttributes_MultiCUDA)62 TEST(TensorTest, ToTensorAndTensorAttributes_MultiCUDA) {
63   auto tensor = at::empty({3, 4});
64   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
65 
66   auto other = at::empty({3, 4}, at::kFloat);
67   tensor = tensor.to(other);
68   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
69 
70   other = at::empty({3, 4}, at::TensorOptions(at::kCUDA).dtype(at::kDouble));
71   tensor = tensor.to(other.dtype());
72   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
73   tensor = tensor.to(other.device());
74   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
75 
76   other = at::empty({3, 4}, at::TensorOptions({at::kCUDA, 1}).dtype(at::kLong));
77   tensor = tensor.to(other.device(), other.dtype());
78   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kLong, at::kStrided);
79 
80   other = at::empty({3, 4}, at::kFloat);
81   tensor = tensor.to(other.options());
82   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
83 }
84 
TEST(TensorTest,ToDoesNotCopyWhenOptionsAreAllTheSame_CUDA)85 TEST(TensorTest, ToDoesNotCopyWhenOptionsAreAllTheSame_CUDA) {
86   auto tensor = at::empty(
87       {3, 4}, at::TensorOptions(at::kFloat).device(at::Device("cuda")));
88   auto hopefully_not_copy = tensor.to(tensor.options());
89   ASSERT_EQ(hopefully_not_copy.data_ptr<float>(), tensor.data_ptr<float>());
90   hopefully_not_copy = tensor.to(at::kFloat);
91   ASSERT_EQ(hopefully_not_copy.data_ptr<float>(), tensor.data_ptr<float>());
92   hopefully_not_copy = tensor.to("cuda");
93   ASSERT_EQ(hopefully_not_copy.data_ptr<float>(), tensor.data_ptr<float>());
94   hopefully_not_copy = tensor.to(at::TensorOptions("cuda"));
95   ASSERT_EQ(hopefully_not_copy.data_ptr<float>(), tensor.data_ptr<float>());
96   hopefully_not_copy = tensor.to(at::TensorOptions(at::kFloat));
97   ASSERT_EQ(hopefully_not_copy.data_ptr<float>(), tensor.data_ptr<float>());
98 }
99 
TEST(TensorTest,ToDeviceAndDtype_MultiCUDA)100 TEST(TensorTest, ToDeviceAndDtype_MultiCUDA) {
101   auto tensor = at::empty({3, 4});
102   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
103 
104   tensor = tensor.to({at::kCUDA, 1}, at::kInt);
105   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
106 
107   tensor = tensor.to(at::TensorOptions({at::kCUDA, 0}).dtype(at::kLong));
108   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kLong, at::kStrided);
109 
110   tensor = tensor.to(at::TensorOptions({at::kCUDA, 1}).dtype(at::kDouble));
111   REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
112 
113   tensor = tensor.to(at::kCPU, at::kInt);
114   REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
115 }
116 
TEST(TensorTest,MagmaInitializesCorrectly_CUDA)117 TEST(TensorTest, MagmaInitializesCorrectly_CUDA) {
118   // Any tensor will work here as long as it's invertible
119   // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
120   float data[] = {1, 1, 1, 0, 0, 3, 1, 2, 2, 3, 1, 0, 1, 0, 2, 1};
121   auto tensor =
122       at::from_blob(data, {4, 4}, at::TensorOptions(at::kFloat)).cuda();
123   if (at::hasMAGMA()) {
124     at::inverse(tensor);
125   }
126 }
127