xref: /aosp_15_r20/external/executorch/kernels/test/op_max_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <executorch/test/utils/DeathTest.h>
17 #include <gtest/gtest.h>
18 #include <cmath>
19 
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25 
26 class OpMaxOutTest : public OperatorTest {
27  protected:
op_max_dim_max(const Tensor & self,int64_t dim,bool keepdim,Tensor & max,Tensor & max_indices)28   std::tuple<Tensor&, Tensor&> op_max_dim_max(
29       const Tensor& self,
30       int64_t dim,
31       bool keepdim,
32       Tensor& max,
33       Tensor& max_indices) {
34     return torch::executor::aten::max_outf(
35         context_, self, dim, keepdim, max, max_indices);
36   }
37 
38   template <ScalarType IN_DTYPE>
test_max_out_invalid_dimensions()39   void test_max_out_invalid_dimensions() {
40     TensorFactory<IN_DTYPE> tf_in;
41     TensorFactory<ScalarType::Long> tf_long;
42 
43     Tensor self = tf_in.ones(/*sizes=*/{2, 3, 4});
44     Tensor max = tf_in.zeros({2, 3, 2});
45     Tensor max_indices = tf_in.zeros({2, 3});
46 
47     // output tensor dim mismatch
48     ET_EXPECT_KERNEL_FAILURE(
49         context_,
50         op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices));
51 
52     // output tensor shape incorrect: size of dimension: dim should be 1
53     max = tf_in.zeros({2, 3, 2});
54     max_indices = tf_in.zeros({2, 3, 2});
55     ET_EXPECT_KERNEL_FAILURE(
56         context_,
57         op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices));
58 
59     // output tensor shape should be squeezed when keepdim is false
60     max = tf_in.zeros({2, 3, 1});
61     max_indices = tf_in.zeros({2, 3, 1});
62     ET_EXPECT_KERNEL_FAILURE(
63         context_,
64         op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/false, max, max_indices));
65 
66     // invalid dim
67     max = tf_in.zeros({2, 3, 1});
68     max_indices = tf_in.zeros({2, 3, 1});
69     ET_EXPECT_KERNEL_FAILURE(
70         context_,
71         op_max_dim_max(self, /*dim=*/3, /*keepdim=*/true, max, max_indices));
72   }
73 
test_dynamic_shape(const std::vector<int32_t> & out_shape,enum torch::executor::TensorShapeDynamism dynamism)74   void test_dynamic_shape(
75       const std::vector<int32_t>& out_shape,
76       enum torch::executor::TensorShapeDynamism dynamism) {
77     /* %python
78     %rewrite(max_template) */
79 
80     TensorFactory<ScalarType::Float> tf;
81     TensorFactory<ScalarType::Long> tfl;
82 
83     Tensor input = tf.make(
84         {2, 3, 4},
85         {0.49625658988952637,  0.7682217955589294,  0.08847743272781372,
86          0.13203048706054688,  0.30742281675338745, 0.6340786814689636,
87          0.4900934100151062,   0.8964447379112244,  0.455627977848053,
88          0.6323062777519226,   0.3488934636116028,  0.40171730518341064,
89          0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
90          0.518521785736084,    0.6976675987243652,  0.800011396408081,
91          0.16102945804595947,  0.28226858377456665, 0.6816085577011108,
92          0.9151939749717712,   0.39709991216659546, 0.8741558790206909});
93     Tensor expected_max = tf.make(
94         {2, 4},
95         {0.49625658988952637,
96          0.7682217955589294,
97          0.4900934100151062,
98          0.8964447379112244,
99          0.6976675987243652,
100          0.9151939749717712,
101          0.39709991216659546,
102          0.8741558790206909});
103     Tensor expected_max_indices = tfl.make({2, 4}, {0, 0, 1, 1, 1, 2, 2, 2});
104     Tensor max = tf.zeros(out_shape, dynamism);
105     Tensor max_indices = tfl.zeros(out_shape, dynamism);
106 
107     op_max_dim_max(input, 1, false, max, max_indices);
108     EXPECT_TENSOR_EQ(max, expected_max);
109     EXPECT_TENSOR_EQ(max_indices, expected_max_indices);
110   }
111 
112   template <ScalarType IN_DTYPE>
test_max_out_dtype()113   void test_max_out_dtype() {
114     TensorFactory<IN_DTYPE> tf_in;
115     TensorFactory<ScalarType::Long> tf_long;
116     // clang-format off
117     Tensor self = tf_in.make(
118       {2, 3, 4},
119       {
120         0, 1, 2, 4,
121         4, 2, 1, 0,
122         1, 0, 4, 2,
123 
124         4, 2, 1, 0,
125         0, 1, 2, 4,
126         1, 0, 4, 2,
127       });
128     // clang-format on
129 
130     Tensor max = tf_in.zeros({2, 4});
131     Tensor max_indices = tf_long.zeros({2, 4});
132     op_max_dim_max(self, /*dim=*/1, /*keepdim=*/false, max, max_indices);
133     // clang-format off
134     EXPECT_TENSOR_CLOSE(max, tf_in.make(
135       {2, 4},
136       {
137         4, 2, 4, 4,
138         4, 2, 4, 4
139       }));
140 
141     EXPECT_TENSOR_EQ(max_indices, tf_long.make(
142       {2, 4},
143       {
144         1, 1, 2, 0,
145         0, 0, 2, 1
146       }));
147     // clang-format on
148 
149     // negative dim should work
150     op_max_dim_max(self, /*dim=*/-2, /*keepdim=*/false, max, max_indices);
151     // clang-format off
152     EXPECT_TENSOR_CLOSE(max, tf_in.make(
153       {2, 4},
154       {
155         4, 2, 4, 4,
156         4, 2, 4, 4
157       }));
158     EXPECT_TENSOR_EQ(max_indices, tf_long.make(
159       {2, 4},
160       {
161         1, 1, 2, 0,
162         0, 0, 2, 1
163       }));
164     // clang-format on
165 
166     // keepdim should work
167     max = tf_in.zeros({2, 3, 1});
168     max_indices = tf_long.zeros({2, 3, 1});
169     op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices);
170     EXPECT_TENSOR_CLOSE(max, tf_in.make({2, 3, 1}, {4, 4, 4, 4, 4, 4}));
171     EXPECT_TENSOR_EQ(max_indices, tf_long.make({2, 3, 1}, {3, 0, 2, 0, 3, 2}));
172   }
173 };
174 
175 template <>
test_max_out_dtype()176 void OpMaxOutTest::test_max_out_dtype<ScalarType::Bool>() {
177   TensorFactory<ScalarType::Bool> tf_bool;
178   TensorFactory<ScalarType::Long> tf_long;
179   // clang-format off
180   Tensor self = tf_bool.make(
181     {2, 3, 4},
182     {
183       true,  false, true,  false,
184       false, false, false, false,
185       false, true,  true,  false,
186 
187       false, false, true,  false,
188       false, false, false, true,
189       true,  true,  true,  true,
190     });
191   // clang-format on
192 
193   Tensor max = tf_bool.zeros({2, 3, 1});
194   Tensor max_indices = tf_long.zeros({2, 3, 1});
195 
196   // +/-inf and nan should work
197   op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices);
198   // clang-format off
199   EXPECT_TENSOR_CLOSE(
200       max, tf_bool.make(
201         {2, 3, 1},
202         {
203           true,
204           false,
205           true,
206 
207           true,
208           true,
209           true
210         }));
211   EXPECT_TENSOR_EQ(max_indices, tf_long.make(
212     {2, 3, 1},
213     {
214       0,
215       0,
216       1,
217 
218       2,
219       3,
220       0
221     }));
222   // clang-format on
223 }
224 
225 class OpMaxUnaryOutTest : public OperatorTest {
226  protected:
op_max_unary_out(const Tensor & self,Tensor & out)227   Tensor& op_max_unary_out(const Tensor& self, Tensor& out) {
228     return torch::executor::aten::max_outf(context_, self, out);
229   }
230 
231   template <ScalarType IN_DTYPE>
test_max_unary_out_dtype()232   void test_max_unary_out_dtype() {
233     TensorFactory<IN_DTYPE> tf_in;
234     TensorFactory<ScalarType::Float> tf_out;
235     Tensor input = tf_in.make({2, 3}, {0, 1, 2, 4, 4, 2});
236     Tensor out = tf_out.zeros({});
237     Tensor expected = tf_out.make({}, {4});
238     op_max_unary_out(input, out);
239     EXPECT_TENSOR_CLOSE(out, expected);
240   }
241 
242   template <typename CTYPE, ScalarType IN_DTYPE>
test_max_unary_out_empty_integer()243   void test_max_unary_out_empty_integer() {
244     TensorFactory<IN_DTYPE> tf_in;
245     Tensor input = tf_in.make({2, 0}, {});
246     Tensor out = tf_in.zeros({});
247     Tensor expected = tf_in.make({}, {std::numeric_limits<CTYPE>::lowest()});
248     op_max_unary_out(input, out);
249     EXPECT_TENSOR_CLOSE(out, expected);
250   }
251 
252   template <typename CTYPE, ScalarType IN_DTYPE>
test_max_unary_out_empty_floating()253   void test_max_unary_out_empty_floating() {
254     TensorFactory<IN_DTYPE> tf_in;
255     Tensor input = tf_in.make({2, 0}, {});
256     Tensor out = tf_in.zeros({});
257     Tensor expected = tf_in.make({}, {-INFINITY});
258     op_max_unary_out(input, out);
259     EXPECT_TENSOR_CLOSE(out, expected);
260   }
261 };
262 
TEST_F(OpMaxUnaryOutTest,AllRealHBF16InputFloatOutputPasses)263 TEST_F(OpMaxUnaryOutTest, AllRealHBF16InputFloatOutputPasses) {
264 #define TEST_ENTRY(ctype, dtype) test_max_unary_out_dtype<ScalarType::dtype>();
265   ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
266 #undef TEST_ENTRY
267 }
268 
TEST_F(OpMaxUnaryOutTest,EmptyIntegerInput)269 TEST_F(OpMaxUnaryOutTest, EmptyIntegerInput) {
270 #define TEST_ENTRY(ctype, dtype) \
271   test_max_unary_out_empty_integer<ctype, ScalarType::dtype>();
272   ET_FORALL_INT_TYPES(TEST_ENTRY);
273 #undef TEST_ENTRY
274 }
275 
TEST_F(OpMaxUnaryOutTest,EmptyFloatingInput)276 TEST_F(OpMaxUnaryOutTest, EmptyFloatingInput) {
277 #define TEST_ENTRY(ctype, dtype) \
278   test_max_unary_out_empty_floating<ctype, ScalarType::dtype>();
279   ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
280 #undef TEST_ENTRY
281 }
282 
TEST_F(OpMaxOutTest,MismatchedDimensionsDies)283 TEST_F(OpMaxOutTest, MismatchedDimensionsDies) {
284   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
285     GTEST_SKIP() << "ATen kernel test fails";
286   }
287 #define TEST_ENTRY(ctype, dtype) \
288   test_max_out_invalid_dimensions<ScalarType::dtype>();
289   ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
290 #undef TEST_ENTRY
291 }
292 
TEST_F(OpMaxOutTest,MismatchedDTypesDies)293 TEST_F(OpMaxOutTest, MismatchedDTypesDies) {
294   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
295     GTEST_SKIP() << "ATen kernel test fails";
296   }
297   TensorFactory<ScalarType::Float> tf_float;
298   TensorFactory<ScalarType::Long> tf_long;
299 
300   Tensor self = tf_float.ones(/*sizes=*/{2, 3, 4});
301   Tensor max = tf_long.zeros({2, 3, 1});
302   Tensor max_indices = tf_long.zeros({2, 3, 1});
303 
304   // dtype of self and max should match
305   ET_EXPECT_KERNEL_FAILURE(
306       context_,
307       op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices));
308 
309   // max_value tensor should have long as dtype
310   max = tf_float.zeros({2, 3, 1});
311   max_indices = tf_float.zeros({2, 3, 1});
312   ET_EXPECT_KERNEL_FAILURE(
313       context_,
314       op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices));
315 }
316 
TEST_F(OpMaxOutTest,AllRealInputLongOutputPasses)317 TEST_F(OpMaxOutTest, AllRealInputLongOutputPasses) {
318 #define TEST_ENTRY(ctype, dtype) test_max_out_dtype<ScalarType::dtype>();
319   ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
320 #undef TEST_ENTRY
321 }
322 
TEST_F(OpMaxOutTest,InfinityAndNANTest)323 TEST_F(OpMaxOutTest, InfinityAndNANTest) {
324   TensorFactory<ScalarType::Float> tf_float;
325   TensorFactory<ScalarType::Long> tf_long;
326   // clang-format off
327   Tensor self = tf_float.make(
328     {2, 3, 4},
329     {
330       0,        1,         2,        INFINITY,
331       INFINITY, -INFINITY, 1,        0,
332       NAN,      INFINITY, -INFINITY, 2,
333 
334       NAN, NAN,      1,    0,
335       0,   INFINITY, NAN,  4,
336       1,   NAN,      3.14, 2,
337     });
338   // clang-format on
339 
340   Tensor max = tf_float.zeros({2, 3, 1});
341   Tensor max_indices = tf_long.zeros({2, 3, 1});
342 
343   // +/-inf and nan should work
344   op_max_dim_max(self, /*dim=*/-1, /*keepdim=*/true, max, max_indices);
345   EXPECT_TENSOR_CLOSE(
346       max, tf_float.make({2, 3, 1}, {INFINITY, INFINITY, NAN, NAN, NAN, NAN}));
347   // clang-format off
348   EXPECT_TENSOR_EQ(max_indices, tf_long.make(
349     {2, 3, 1},
350     {
351       3,
352       0,
353       0,
354 
355       0,
356       2,
357       1
358     }));
359   // clang-format on
360 }
361 
362 /* %python
363 import torch
364 torch.manual_seed(0)
365 input = torch.rand(2, 3, 4)
366 dim = 1
367 keepdim = False
368 (values, indices) = torch.max(input, dim, keepdim=keepdim)
369 
370 max_template = f"""
371   {declare_tensor_factory("ScalarType::Float", "tf")}
372   {declare_tensor_factory("ScalarType::Long", "tfl")}
373 
374   {declare_tensor_make_t("input", "tf")}
375   {declare_tensor_make_t("values", "tf", "expected_max")}
376   {declare_tensor_make_t("indices", "tfl", "expected_max_indices")}
377   {declare_tensor_zeros("out_shape, dynamism", "tf", "max")}
378   {declare_tensor_zeros("out_shape, dynamism", "tfl", "max_indices")}
379 
380   op_max_dim_max(input, $dim$, $keepdim$, max, max_indices);
381   EXPECT_TENSOR_EQ(max, expected_max);
382   EXPECT_TENSOR_EQ(max_indices, expected_max_indices);""" */
383 
TEST_F(OpMaxOutTest,DynamicShapeUpperBoundSameAsExpected)384 TEST_F(OpMaxOutTest, DynamicShapeUpperBoundSameAsExpected) {
385   test_dynamic_shape(
386       {2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
387 }
388 
TEST_F(OpMaxOutTest,DynamicShapeUpperBoundLargerThanExpected)389 TEST_F(OpMaxOutTest, DynamicShapeUpperBoundLargerThanExpected) {
390   test_dynamic_shape(
391       {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
392 }
393 
TEST_F(OpMaxOutTest,DynamicShapeUnbound)394 TEST_F(OpMaxOutTest, DynamicShapeUnbound) {
395   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
396     GTEST_SKIP() << "Dynamic shape unbound not supported";
397   }
398   test_dynamic_shape(
399       {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
400 }
401