1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15
16 #include <gtest/gtest.h>
17
18 using namespace ::testing;
19 using exec_aten::ScalarType;
20 using exec_aten::Tensor;
21 using torch::executor::testing::TensorFactory;
22
23 class OpFloorDivideTest : public OperatorTest {
24 protected:
25 Tensor&
op_floor_divide_out(const Tensor & self,const Tensor & other,Tensor & out)26 op_floor_divide_out(const Tensor& self, const Tensor& other, Tensor& out) {
27 return torch::executor::aten::floor_divide_outf(context_, self, other, out);
28 }
29
30 template <ScalarType DTYPE>
test_integer_floor_divide()31 void test_integer_floor_divide() {
32 TensorFactory<DTYPE> tf;
33
34 const std::vector<int32_t> sizes = {3, 2};
35
36 // Destination for the floor_divide.
37 Tensor out = tf.zeros(sizes);
38
39 // floor_divide two tensors.
40 // Integer division of -8 / 6 return -1, but -8 // 6 is -2
41 op_floor_divide_out(
42 tf.make(sizes, /*data=*/{-8, 1, 2, 4, 8, 3}),
43 tf.make(sizes, /*data=*/{6, 2, 2, 2, 2, -5}),
44 out);
45
46 // Check that it matches the expected output.
47 EXPECT_TENSOR_EQ(out, tf.make(sizes, /*data=*/{-2, 0, 1, 2, 4, -1}));
48 }
49
50 template <ScalarType DTYPE>
test_floating_point_floor_divide()51 void test_floating_point_floor_divide() {
52 TensorFactory<DTYPE> tf;
53
54 const std::vector<int32_t> sizes = {3, 2};
55
56 // Destination for the floor_divide.
57 Tensor out = tf.zeros(sizes);
58
59 // floor_divide two tensors.
60 // std::floor(-0.5 / -0.1) == 5.0, but -0.5 // -0.1 yeilds 4.0
61 op_floor_divide_out(
62 tf.make(sizes, /*data=*/{-5.3, 1.1, 2.2, 4.4, 6.8, -0.5}),
63 tf.make(sizes, /*data=*/{2.7, 2.0, 2.0, 2.0, 2.0, -0.1}),
64 out);
65
66 // Check that it matches the expected output.
67 EXPECT_TENSOR_CLOSE(
68 out, tf.make(sizes, /*data=*/{-2.0, 0.0, 1.0, 2.0, 3.0, 4.0}));
69 }
70 };
71
72 // Common testing for floor-dividing two integer Tensors.
TEST_F(OpFloorDivideTest,ByteTensors)73 TEST_F(OpFloorDivideTest, ByteTensors) {
74 TensorFactory<ScalarType::Byte> tf;
75
76 const std::vector<int32_t> sizes = {2, 2};
77
78 // Destination for the floor_divide.
79 Tensor out = tf.zeros(sizes);
80
81 // floor_divide two tensors.
82 op_floor_divide_out(
83 tf.make(sizes, /*data=*/{1, 2, 4, 8}),
84 tf.make(sizes, /*data=*/{2, 2, 2, 2}),
85 out);
86
87 // Check that it matches the expected output.
88 EXPECT_TENSOR_EQ(out, tf.make(sizes, /*data=*/{0, 1, 2, 4}));
89 }
90
TEST_F(OpFloorDivideTest,CharTensors)91 TEST_F(OpFloorDivideTest, CharTensors) {
92 test_integer_floor_divide<ScalarType::Char>();
93 }
94
TEST_F(OpFloorDivideTest,ShortTensors)95 TEST_F(OpFloorDivideTest, ShortTensors) {
96 test_integer_floor_divide<ScalarType::Short>();
97 }
98
TEST_F(OpFloorDivideTest,IntTensors)99 TEST_F(OpFloorDivideTest, IntTensors) {
100 test_integer_floor_divide<ScalarType::Int>();
101 }
102
TEST_F(OpFloorDivideTest,LongTensors)103 TEST_F(OpFloorDivideTest, LongTensors) {
104 test_integer_floor_divide<ScalarType::Long>();
105 }
106
107 // Common testing for floor-dividing two floating point Tensors.
TEST_F(OpFloorDivideTest,FloatTensors)108 TEST_F(OpFloorDivideTest, FloatTensors) {
109 test_floating_point_floor_divide<ScalarType::Float>();
110 }
111
TEST_F(OpFloorDivideTest,DoubleTensors)112 TEST_F(OpFloorDivideTest, DoubleTensors) {
113 test_floating_point_floor_divide<ScalarType::Double>();
114 }
115
TEST_F(OpFloorDivideTest,UnhandledDtypeDies)116 TEST_F(OpFloorDivideTest, UnhandledDtypeDies) {
117 // floor_divide() doesn't handle Bool.
118 TensorFactory<ScalarType::Bool> tf;
119
120 const std::vector<int32_t> sizes = {2, 2};
121
122 // Addends.
123 Tensor a = tf.make(sizes, /*data=*/{false, true, false, true});
124 Tensor b = tf.make(sizes, /*data=*/{true, true, true, true});
125
126 // Destination for the foor_divide.
127 Tensor out = tf.zeros(sizes);
128
129 // Dividing the two boolean tensors should cause an assertion and kill the
130 // test process.
131 ET_EXPECT_KERNEL_FAILURE(context_, op_floor_divide_out(a, b, out));
132 }
133
134 // Mismatched shape tests.
135
TEST_F(OpFloorDivideTest,MismatchedInputShapesDies)136 TEST_F(OpFloorDivideTest, MismatchedInputShapesDies) {
137 TensorFactory<ScalarType::Int> tf;
138
139 // Addends with different shapes.
140 Tensor a = tf.ones(/*sizes=*/{4});
141 Tensor b = tf.ones(/*sizes=*/{2, 2});
142
143 // Destination for the floor_divide; matches the shape of one of the inputs.
144 Tensor out = tf.zeros(/*sizes=*/{4});
145
146 // Adding the two mismatched tensors should cause an assertion and kill the
147 // test process.
148 ET_EXPECT_KERNEL_FAILURE(context_, op_floor_divide_out(a, b, out));
149 }
150
TEST_F(OpFloorDivideTest,MismatchedOutputShapesDies)151 TEST_F(OpFloorDivideTest, MismatchedOutputShapesDies) {
152 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
153 GTEST_SKIP() << "ATen kernel can handle mismatched output shape";
154 }
155 TensorFactory<ScalarType::Int> tf;
156
157 const std::vector<int32_t> sizes = {2, 2};
158
159 // Addends with the same shapes.
160 Tensor a = tf.ones(sizes);
161 Tensor b = tf.ones(sizes);
162
163 // Destination with a different shape.
164 Tensor out = tf.zeros(/*sizes=*/{4});
165
166 // Adding the tensors into a mismatched output should cause an assertion and
167 // kill the test process.
168 ET_EXPECT_KERNEL_FAILURE(context_, op_floor_divide_out(a, b, out));
169 }
170
TEST_F(OpFloorDivideTest,BroadcastDimSizeIsOneAB)171 TEST_F(OpFloorDivideTest, BroadcastDimSizeIsOneAB) {
172 GTEST_SKIP() << "Dynamic shape not supported";
173 TensorFactory<ScalarType::Float> tf;
174
175 Tensor x = tf.make(
176 {3, 2},
177 {0.6651028990745544,
178 0.47241002321243286,
179 0.15020078420639038,
180 0.5280023813247681,
181 0.9517974257469177,
182 0.5294632911682129});
183 Tensor y = tf.make({1, 2}, {0.522396445274353, 0.6753279566764832});
184 Tensor expected_result = tf.make({3, 2}, {1.0, 0.0, 0.0, 0.0, 1.0, 0.0});
185
186 Tensor out = tf.zeros({3, 2});
187 Tensor ret = op_floor_divide_out(x, y, out);
188 EXPECT_TENSOR_CLOSE(out, expected_result);
189 }
190
TEST_F(OpFloorDivideTest,BroadcastDimSizeMissingAB)191 TEST_F(OpFloorDivideTest, BroadcastDimSizeMissingAB) {
192 GTEST_SKIP() << "Dynamic shape not supported";
193 TensorFactory<ScalarType::Float> tf;
194
195 Tensor x = tf.make(
196 {3, 2},
197 {0.6651028990745544,
198 0.47241002321243286,
199 0.15020078420639038,
200 0.5280023813247681,
201 0.9517974257469177,
202 0.5294632911682129});
203 Tensor y = tf.make({2}, {0.522396445274353, 0.6753279566764832});
204 Tensor expected_result = tf.make({3, 2}, {1.0, 0.0, 0.0, 0.0, 1.0, 0.0});
205
206 Tensor out = tf.zeros({3, 2});
207 Tensor ret = op_floor_divide_out(x, y, out);
208 EXPECT_TENSOR_CLOSE(out, expected_result);
209 }
210
TEST_F(OpFloorDivideTest,BroadcastDimSizeIsOneBA)211 TEST_F(OpFloorDivideTest, BroadcastDimSizeIsOneBA) {
212 GTEST_SKIP() << "Dynamic shape not supported";
213 TensorFactory<ScalarType::Float> tf;
214
215 Tensor x = tf.make({1, 2}, {0.522396445274353, 0.6753279566764832});
216 Tensor y = tf.make(
217 {3, 2},
218 {0.6651028990745544,
219 0.47241002321243286,
220 0.15020078420639038,
221 0.5280023813247681,
222 0.9517974257469177,
223 0.5294632911682129});
224 Tensor expected_result = tf.make({3, 2}, {0.0, 1.0, 3.0, 1.0, 0.0, 1.0});
225
226 Tensor out = tf.zeros({3, 2});
227 Tensor ret = op_floor_divide_out(x, y, out);
228 EXPECT_TENSOR_CLOSE(out, expected_result);
229 }
230
TEST_F(OpFloorDivideTest,BroadcastDimSizeMissingBA)231 TEST_F(OpFloorDivideTest, BroadcastDimSizeMissingBA) {
232 GTEST_SKIP() << "Dynamic shape not supported";
233 TensorFactory<ScalarType::Float> tf;
234
235 Tensor x = tf.make({1, 2}, {0.522396445274353, 0.6753279566764832});
236 Tensor y = tf.make(
237 {3, 2},
238 {0.6651028990745544,
239 0.47241002321243286,
240 0.15020078420639038,
241 0.5280023813247681,
242 0.9517974257469177,
243 0.5294632911682129});
244 Tensor expected_result = tf.make({3, 2}, {0.0, 1.0, 3.0, 1.0, 0.0, 1.0});
245
246 Tensor out = tf.zeros({3, 2});
247 Tensor ret = op_floor_divide_out(x, y, out);
248 EXPECT_TENSOR_CLOSE(out, expected_result);
249 }
250
TEST_F(OpFloorDivideTest,DynamicShapeUpperBoundSameAsExpected)251 TEST_F(OpFloorDivideTest, DynamicShapeUpperBoundSameAsExpected) {
252 GTEST_SKIP() << "Dynamic shape not supported";
253 TensorFactory<ScalarType::Float> tf;
254
255 Tensor x = tf.make(
256 {3, 2},
257 {0.34620773792266846,
258 0.7118645310401917,
259 0.028005361557006836,
260 0.8868894577026367,
261 0.38272881507873535,
262 0.19501900672912598});
263 Tensor y = tf.make(
264 {3, 2},
265 {0.3282443881034851,
266 0.7458182573318481,
267 0.1568273901939392,
268 0.6325231194496155,
269 0.2777167558670044,
270 0.09974533319473267});
271 Tensor expected_result = tf.make({3, 2}, {1.0, 0.0, 0.0, 1.0, 1.0, 1.0});
272
273 Tensor out =
274 tf.zeros({3, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
275 Tensor ret = op_floor_divide_out(x, y, out);
276 EXPECT_TENSOR_CLOSE(out, expected_result);
277 }
278
TEST_F(OpFloorDivideTest,DynamicShapeUpperBoundLargerThanExpected)279 TEST_F(OpFloorDivideTest, DynamicShapeUpperBoundLargerThanExpected) {
280 GTEST_SKIP() << "Dynamic shape not supported";
281 TensorFactory<ScalarType::Float> tf;
282
283 Tensor x = tf.make(
284 {3, 2},
285 {0.34620773792266846,
286 0.7118645310401917,
287 0.028005361557006836,
288 0.8868894577026367,
289 0.38272881507873535,
290 0.19501900672912598});
291 Tensor y = tf.make(
292 {3, 2},
293 {0.3282443881034851,
294 0.7458182573318481,
295 0.1568273901939392,
296 0.6325231194496155,
297 0.2777167558670044,
298 0.09974533319473267});
299 Tensor expected_result = tf.make({3, 2}, {1.0, 0.0, 0.0, 1.0, 1.0, 1.0});
300
301 Tensor out =
302 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
303 Tensor ret = op_floor_divide_out(x, y, out);
304 EXPECT_TENSOR_CLOSE(out, expected_result);
305 }
306
TEST_F(OpFloorDivideTest,DynamicShapeUnbound)307 TEST_F(OpFloorDivideTest, DynamicShapeUnbound) {
308 GTEST_SKIP() << "Dynamic shape not supported";
309 TensorFactory<ScalarType::Float> tf;
310
311 Tensor x = tf.make(
312 {3, 2},
313 {0.34620773792266846,
314 0.7118645310401917,
315 0.028005361557006836,
316 0.8868894577026367,
317 0.38272881507873535,
318 0.19501900672912598});
319 Tensor y = tf.make(
320 {3, 2},
321 {0.3282443881034851,
322 0.7458182573318481,
323 0.1568273901939392,
324 0.6325231194496155,
325 0.2777167558670044,
326 0.09974533319473267});
327 Tensor expected_result = tf.make({3, 2}, {1.0, 0.0, 0.0, 1.0, 1.0, 1.0});
328
329 Tensor out =
330 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
331 Tensor ret = op_floor_divide_out(x, y, out);
332 EXPECT_TENSOR_CLOSE(out, expected_result);
333 }
334