1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <gtest/gtest.h>
10
11 #include <executorch/runtime/core/evalue.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/tensor_util.h>
16 #include <executorch/runtime/kernel/kernel_runtime_context.h>
17 #include <executorch/runtime/kernel/operator_registry.h>
18 #include <executorch/runtime/platform/runtime.h>
19 #include <executorch/test/utils/DeathTest.h>
20 #include <cstdint>
21 #include <cstdio>
22
23 using exec_aten::SizesType;
24 using torch::executor::Error;
25 using torch::executor::resize_tensor;
26
27 namespace torch {
28 namespace executor {
29
30 class RegisterPrimOpsTest : public ::testing::Test {
31 protected:
32 KernelRuntimeContext context;
SetUp()33 void SetUp() override {
34 torch::executor::runtime_init();
35 context = KernelRuntimeContext();
36 }
37 };
38
TEST_F(RegisterPrimOpsTest,OpRegistered)39 TEST_F(RegisterPrimOpsTest, OpRegistered) {
40 EXPECT_TRUE(hasOpsFn("aten::sym_size.int"));
41 EXPECT_TRUE(hasOpsFn("aten::sym_numel"));
42 }
43
TEST_F(RegisterPrimOpsTest,SymSizeReturnsCorrectValue)44 TEST_F(RegisterPrimOpsTest, SymSizeReturnsCorrectValue) {
45 testing::TensorFactory<ScalarType::Int> tf;
46
47 Tensor self_tensor = tf.ones({3, 5});
48 EValue values[3];
49 int64_t dim = 1;
50 int64_t out = 0;
51 values[0] = EValue(self_tensor);
52 values[1] = EValue(dim);
53 values[2] = EValue(out);
54
55 EValue* stack[3];
56 for (size_t i = 0; i < 3; i++) {
57 stack[i] = &values[i];
58 }
59
60 getOpsFn("aten::sym_size.int")(context, stack);
61
62 int64_t expected = 5;
63 EXPECT_EQ(stack[2]->toInt(), expected);
64 }
65
TEST_F(RegisterPrimOpsTest,SymNumelReturnsCorrectValue)66 TEST_F(RegisterPrimOpsTest, SymNumelReturnsCorrectValue) {
67 testing::TensorFactory<ScalarType::Int> tf;
68
69 Tensor self_tensor = tf.ones({3, 5});
70 EValue values[2];
71 int64_t out = 0;
72 values[0] = EValue(self_tensor);
73 values[1] = EValue(out);
74
75 EValue* stack[2];
76 for (size_t i = 0; i < 2; i++) {
77 stack[i] = &values[i];
78 }
79
80 getOpsFn("aten::sym_numel")(context, stack);
81
82 int64_t expected = 15;
83 EXPECT_EQ(stack[1]->toInt(), expected);
84 }
85
TEST_F(RegisterPrimOpsTest,TestAlgebraOps)86 TEST_F(RegisterPrimOpsTest, TestAlgebraOps) {
87 EValue values[3];
88 int64_t a = 3;
89 int64_t b = 4;
90 int64_t out = 0;
91 values[0] = EValue(a);
92 values[1] = EValue(b);
93 values[2] = EValue(out);
94
95 EValue* stack[3];
96 for (size_t i = 0; i < 3; i++) {
97 stack[i] = &values[i];
98 }
99
100 getOpsFn("executorch_prim::add.Scalar")(context, stack);
101 EXPECT_EQ(stack[2]->toInt(), 7);
102
103 getOpsFn("executorch_prim::sub.Scalar")(context, stack);
104 EXPECT_EQ(stack[2]->toInt(), -1);
105
106 getOpsFn("executorch_prim::mul.Scalar")(context, stack);
107 EXPECT_EQ(stack[2]->toInt(), 12);
108
109 getOpsFn("executorch_prim::floordiv.Scalar")(context, stack);
110 EXPECT_EQ(stack[2]->toInt(), 0);
111
112 getOpsFn("executorch_prim::truediv.Scalar")(context, stack);
113 EXPECT_FLOAT_EQ(stack[2]->toDouble(), 0.75);
114
115 getOpsFn("executorch_prim::mod.int")(context, stack);
116 EXPECT_EQ(stack[2]->toInt(), 3);
117
118 getOpsFn("executorch_prim::mod.Scalar")(context, stack);
119 EXPECT_EQ(stack[2]->toInt(), 3);
120
121 getOpsFn("executorch_prim::sym_float.Scalar")(context, stack);
122 EXPECT_FLOAT_EQ(stack[1]->toDouble(), 3.0);
123 }
124
TEST_F(RegisterPrimOpsTest,TestETCopyIndex)125 TEST_F(RegisterPrimOpsTest, TestETCopyIndex) {
126 EXPECT_TRUE(hasOpsFn("executorch_prim::et_copy_index.tensor"));
127
128 int64_t index = 0;
129 testing::TensorFactory<ScalarType::Int> tf;
130
131 #ifdef USE_ATEN_LIB
132 // ATen mode tensors don't need dynamism specification.
133 Tensor copy_to = tf.make({2, 2}, {0, 0, 0, 0});
134 #else
135 std::vector<int> buf(4);
136 SizesType expected_output_size[2] = {0, 0};
137 Tensor copy_to =
138 tf.make({2, 2}, {0, 0, 0, 0}, {}, TensorShapeDynamism::DYNAMIC_BOUND);
139 // Resize the tensor to 0 size for the tests.
140 Error err = resize_tensor(copy_to, {expected_output_size, 2});
141 EXPECT_EQ(err, Error::Ok);
142 #endif
143
144 Tensor to_copy = tf.make({2}, {3, 4});
145
146 EValue values[3];
147 EValue* stack[3];
148
149 values[0] = EValue(copy_to);
150 values[1] = EValue(to_copy);
151 values[2] = EValue(index);
152
153 stack[0] = &values[0];
154 stack[1] = &values[1];
155 stack[2] = &values[2];
156
157 // Simple test to copy to index 0.
158 getOpsFn("executorch_prim::et_copy_index.tensor")(context, stack);
159
160 EXPECT_EQ(copy_to.sizes()[0], 1);
161 EXPECT_EQ(copy_to.sizes()[1], 2);
162 EXPECT_TENSOR_EQ(copy_to, tf.make({1, 2}, {3, 4}));
163
164 values[1] = tf.make({2}, {5, 6});
165 values[2] = EValue((int64_t)1);
166 // Copy to the next index, 1.
167 getOpsFn("executorch_prim::et_copy_index.tensor")(context, stack);
168
169 EXPECT_EQ(copy_to.sizes()[0], 2);
170 EXPECT_EQ(copy_to.sizes()[1], 2);
171 EXPECT_TENSOR_EQ(copy_to, tf.make({2, 2}, {3, 4, 5, 6}));
172 }
173
TEST_F(RegisterPrimOpsTest,TestETCopyIndexMismatchShape)174 TEST_F(RegisterPrimOpsTest, TestETCopyIndexMismatchShape) {
175 int64_t index = 1;
176 testing::TensorFactory<ScalarType::Int> tf;
177
178 EValue values[3];
179 EValue* stack[3];
180
181 auto copy_to = tf.make({2, 3}, {1, 2, 3, 4, 5, 6});
182 auto to_copy = tf.make({2}, {1, 2});
183
184 values[0] = EValue(copy_to);
185 values[1] = EValue(to_copy);
186 values[2] = EValue(index);
187
188 stack[0] = &values[0];
189 stack[1] = &values[1];
190 stack[2] = &values[2];
191
192 // Try to copy and replace at index 1. This will fail because
193 // copy_to.sizes[1:] and to_copy.sizes[:] don't match each other
194 // which is a pre-requisite for this operator.
195 ET_EXPECT_DEATH(
196 getOpsFn("executorch_prim::et_copy_index.tensor")(context, stack), "");
197 }
198
TEST_F(RegisterPrimOpsTest,TestETCopyIndexStaticShape)199 TEST_F(RegisterPrimOpsTest, TestETCopyIndexStaticShape) {
200 int64_t index = 1;
201 testing::TensorFactory<ScalarType::Int> tf;
202
203 EValue values[3];
204 EValue* stack[3];
205
206 // Test with static shape tensors.
207 const std::vector<int> buf = {1, 2, 3, 4};
208 auto copy_to = tf.make({2, 2}, buf);
209 auto to_copy = tf.make({2}, {5, 6});
210
211 values[0] = EValue(copy_to);
212 values[1] = EValue(to_copy);
213 values[2] = EValue(index);
214
215 stack[0] = &values[0];
216 stack[1] = &values[1];
217 stack[2] = &values[2];
218
219 // Copy and replace at index 1.
220 getOpsFn("executorch_prim::et_copy_index.tensor")(context, stack);
221 EXPECT_EQ(copy_to.sizes()[0], 2);
222 EXPECT_EQ(copy_to.sizes()[1], 2);
223 EXPECT_TENSOR_EQ(copy_to, tf.make({2, 2}, {1, 2, 5, 6}));
224
225 #ifndef USE_ATEN_LIB
226 // Copy and replace at index 2. This should trigger an EXPECT
227 // in lean mode.
228 index = 2;
229 values[2] = EValue(index);
230 ET_EXPECT_DEATH(
231 getOpsFn("executorch_prim::et_copy_index.tensor")(context, stack), "");
232 #endif
233 }
234
TEST_F(RegisterPrimOpsTest,TestBooleanOps)235 TEST_F(RegisterPrimOpsTest, TestBooleanOps) {
236 EValue values[3];
237 double a = 3;
238 double b = 4;
239 bool out = false;
240 values[0] = EValue(a);
241 values[1] = EValue(b);
242 values[2] = EValue(out);
243
244 EValue* stack[3];
245 for (size_t i = 0; i < 3; i++) {
246 stack[i] = &values[i];
247 }
248
249 getOpsFn("executorch_prim::ge.Scalar")(context, stack);
250 EXPECT_EQ(stack[2]->toBool(), false);
251
252 getOpsFn("executorch_prim::gt.Scalar")(context, stack);
253 EXPECT_EQ(stack[2]->toBool(), false);
254
255 getOpsFn("executorch_prim::le.Scalar")(context, stack);
256 EXPECT_EQ(stack[2]->toBool(), true);
257
258 getOpsFn("executorch_prim::lt.Scalar")(context, stack);
259 EXPECT_EQ(stack[2]->toBool(), true);
260
261 getOpsFn("executorch_prim::eq.Scalar")(context, stack);
262 EXPECT_EQ(stack[2]->toBool(), false);
263 }
264
TEST_F(RegisterPrimOpsTest,LocalScalarDenseReturnsCorrectValue)265 TEST_F(RegisterPrimOpsTest, LocalScalarDenseReturnsCorrectValue) {
266 testing::TensorFactory<ScalarType::Int> tf;
267
268 Tensor self_tensor = tf.ones({1});
269 const int64_t num_vals = 2;
270 EValue values[num_vals];
271 int64_t out = 0;
272 values[0] = EValue(self_tensor);
273 values[1] = EValue(out);
274
275 EValue* stack[num_vals];
276 for (size_t i = 0; i < num_vals; i++) {
277 stack[i] = &values[i];
278 }
279
280 getOpsFn("aten::_local_scalar_dense")(context, stack);
281
282 int64_t expected = 1;
283 EXPECT_EQ(stack[1]->toInt(), expected);
284 }
285
TEST_F(RegisterPrimOpsTest,NegScalarReturnsCorrectValue)286 TEST_F(RegisterPrimOpsTest, NegScalarReturnsCorrectValue) {
287 EValue values[2];
288
289 // Test with float
290 values[0] = EValue(5.0f);
291 values[1] = EValue(0.0f);
292
293 EValue* stack[2];
294 for (size_t i = 0; i < 2; i++) {
295 stack[i] = &values[i];
296 }
297
298 getOpsFn("executorch_prim::neg.Scalar")(context, stack);
299
300 EXPECT_EQ(stack[1]->toDouble(), -5.0f);
301
302 // Test with int
303 int64_t a = 5;
304 int64_t b = 0;
305 values[0] = EValue(a);
306 values[1] = EValue(b);
307
308 getOpsFn("executorch_prim::neg.Scalar")(context, stack);
309
310 EXPECT_EQ(stack[1]->toInt(), -5l);
311 }
312
TEST_F(RegisterPrimOpsTest,TestNegScalarWithTensorDies)313 TEST_F(RegisterPrimOpsTest, TestNegScalarWithTensorDies) {
314 testing::TensorFactory<ScalarType::Int> tf;
315
316 EValue values[2];
317
318 auto tensor = tf.make({2, 3}, {1, 2, 3, 4, 5, 6});
319
320 int64_t zero = 0;
321 values[0] = EValue(tensor);
322 values[1] = EValue(zero);
323
324 EValue* stack[2];
325 for (size_t i = 0; i < 2; i++) {
326 stack[i] = &values[i];
327 }
328
329 // Try to negate a tensor, which should cause a runtime error.
330 ET_EXPECT_DEATH(getOpsFn("executorch_prim::neg.Scalar")(context, stack), "");
331 }
332
TEST_F(RegisterPrimOpsTest,TestETView)333 TEST_F(RegisterPrimOpsTest, TestETView) {
334 EXPECT_TRUE(hasOpsFn("executorch_prim::et_view.default"));
335
336 testing::TensorFactory<ScalarType::Int> tf;
337
338 // ***************************************************************************
339 // Make self for tests
340 // ***************************************************************************
341 auto self = tf.make({3, 2}, {1, 2, 3, 4, 5, 6});
342 auto self_evalue = EValue(self);
343
344 // ***************************************************************************
345 // Make size for tests
346 // ***************************************************************************
347 int64_t size[3] = {1, 3, -1};
348 EValue size_as_evals[3] = {EValue(size[0]), EValue(size[1]), EValue(size[2])};
349 EValue* size_wrapped_vals[3] = {
350 &size_as_evals[0], &size_as_evals[1], &size_as_evals[2]};
351 int64_t size_unwrapped_vals[3] = {0, 0, 0};
352 EValue size_int_list_evalue = EValue(
353 BoxedEvalueList<int64_t>(size_wrapped_vals, size_unwrapped_vals, 3));
354
355 int64_t bad_size1[3] = {-1, 3, -1}; // two inferred dimensions
356 EValue bad_size_as_evals1[3] = {
357 EValue(bad_size1[0]), EValue(bad_size1[1]), EValue(bad_size1[2])};
358 EValue* bad_size_wrapped_vals1[3] = {
359 &bad_size_as_evals1[0], &bad_size_as_evals1[1], &bad_size_as_evals1[2]};
360 int64_t bad_size_unwrapped_vals1[3] = {0, 0, 0};
361 EValue bad_size_int_list_evalue1 = EValue(BoxedEvalueList<int64_t>(
362 bad_size_wrapped_vals1, bad_size_unwrapped_vals1, 3));
363
364 int64_t bad_size2[3] = {-2, -3, 1}; // negative size not supported
365 EValue bad_size_as_evals2[3] = {
366 EValue(bad_size2[0]), EValue(bad_size2[1]), EValue(bad_size2[2])};
367 EValue* bad_size_wrapped_vals2[3] = {
368 &bad_size_as_evals2[0], &bad_size_as_evals2[1], &bad_size_as_evals2[2]};
369 int64_t bad_size_unwrapped_vals2[3] = {0, 0, 0};
370 EValue bad_size_int_list_evalue2 = EValue(BoxedEvalueList<int64_t>(
371 bad_size_wrapped_vals2, bad_size_unwrapped_vals2, 3));
372
373 // ***************************************************************************
374 // Make outs for tests
375 // ***************************************************************************
376 constexpr int N_GOOD_OUTS = 2;
377 Tensor good_outs[N_GOOD_OUTS] = {
378 tf.ones({1, 3, 2}), // correct size with nullptr
379 tf.ones({1, 3, 2}), // correct size with self data_ptr
380 };
381 internal::reset_data_ptr(good_outs[0]);
382 ET_CHECK(
383 internal::set_tensor_data(
384 good_outs[1], self.mutable_data_ptr(), good_outs[1].nbytes()) ==
385 Error::Ok);
386 EValue good_out_evalues[N_GOOD_OUTS] = {
387 EValue(good_outs[0]), EValue(good_outs[1])};
388
389 // bad outs expect death
390 constexpr int N_BAD_OUTS = 2;
391 Tensor bad_outs[N_BAD_OUTS] = {
392 tf.ones({1, 3, 2, 1}), // wrong rank
393 tf.ones({1, 3, 3}) // wrong size
394 };
395 EValue bad_out_evalues[N_BAD_OUTS] = {
396 EValue(bad_outs[0]), EValue(bad_outs[1])};
397
398 // ***************************************************************************
399 // Run tests
400 // ***************************************************************************
401
402 constexpr int N_BAD_STACKS = N_BAD_OUTS + 2;
403 EValue* bad_stacks[N_BAD_STACKS][3] = {
404 // Bad out stacks
405 {&self_evalue, &size_int_list_evalue, &bad_out_evalues[0]},
406 {&self_evalue, &size_int_list_evalue, &bad_out_evalues[1]},
407 // Bad size stacks
408 {&self_evalue, &bad_size_int_list_evalue1, &good_out_evalues[0]},
409 {&self_evalue, &bad_size_int_list_evalue2, &good_out_evalues[0]}};
410
411 // Bad stacks expect death
412 for (int i = 0; i < N_BAD_STACKS; i++) {
413 ET_EXPECT_DEATH(
414 getOpsFn("executorch_prim::et_view.default")(context, bad_stacks[i]),
415 "");
416 }
417
418 constexpr int N_GOOD_STACKS = N_GOOD_OUTS;
419 EValue* good_out_stacks[N_GOOD_STACKS][3] = {
420 {&self_evalue, &size_int_list_evalue, &good_out_evalues[0]},
421 {&self_evalue, &size_int_list_evalue, &good_out_evalues[1]}};
422
423 // Good outs expect no death and correct output
424 for (int i = 0; i < N_GOOD_STACKS; i++) {
425 getOpsFn("executorch_prim::et_view.default")(context, good_out_stacks[i]);
426 EXPECT_TENSOR_EQ(good_outs[i], tf.make({1, 3, 2}, {1, 2, 3, 4, 5, 6}));
427 EXPECT_EQ(good_outs[i].const_data_ptr(), self.const_data_ptr());
428 }
429 }
430
TEST_F(RegisterPrimOpsTest,TestETViewDynamic)431 TEST_F(RegisterPrimOpsTest, TestETViewDynamic) {
432 testing::TensorFactory<ScalarType::Int> tf;
433
434 auto self = tf.make({3, 1}, {1, 2, 3});
435 auto self_evalue = EValue(self);
436
437 int64_t size[3] = {1, 3, -1}; // inferred size should be {1, 3, 1}
438 // Construct the size as an EValue int_list
439 EValue size_as_evals[3] = {EValue(size[0]), EValue(size[1]), EValue(size[2])};
440 EValue* size_wrapped_vals[3] = {
441 &size_as_evals[0], &size_as_evals[1], &size_as_evals[2]};
442 int64_t size_unwrapped_vals[3] = {0, 0, 0};
443 EValue size_int_list_evalue = EValue(
444 BoxedEvalueList<int64_t>(size_wrapped_vals, size_unwrapped_vals, 3));
445
446 #ifdef USE_ATEN_LIB
447 // ATen mode tensors don't need dynamism specification.
448 auto out = tf.make({3, 2, 1}, {0, 0, 0, 0, 0, 0});
449 #else
450 auto out = tf.make(
451 {3, 2, 1}, {0, 0, 0, 0, 0, 0}, {}, TensorShapeDynamism::DYNAMIC_BOUND);
452 #endif
453
454 internal::reset_data_ptr(out);
455 EValue out_evalue = EValue(out);
456
457 EValue* stack[3] = {&self_evalue, &size_int_list_evalue, &out_evalue};
458
459 getOpsFn("executorch_prim::et_view.default")(context, stack);
460
461 EXPECT_TENSOR_EQ(out, tf.make({1, 3, 1}, {1, 2, 3}));
462 EXPECT_EQ(out.const_data_ptr(), self.const_data_ptr());
463 }
464
TEST_F(RegisterPrimOpsTest,TestETViewEmpty)465 TEST_F(RegisterPrimOpsTest, TestETViewEmpty) {
466 testing::TensorFactory<ScalarType::Int> tf;
467
468 auto self = tf.make({3, 1, 0}, {});
469 auto self_evalue = EValue(self);
470 EXPECT_EQ(self.const_data_ptr(), nullptr); // empty tensor has null data
471
472 // Construct the sizes
473 int64_t size[3] = {3, 1, -1};
474 EValue size_as_evals[3] = {EValue(size[0]), EValue(size[1]), EValue(size[2])};
475 EValue* size_wrapped_vals[3] = {
476 &size_as_evals[0], &size_as_evals[1], &size_as_evals[2]};
477 int64_t size_unwrapped_vals[3] = {0, 0, 0};
478 EValue size_int_list_evalue = EValue(
479 BoxedEvalueList<int64_t>(size_wrapped_vals, size_unwrapped_vals, 3));
480
481 int64_t bad_size[3] = {0, 1, -1}; // bad size: cannot infer with 0
482 EValue bad_size_as_evals[3] = {
483 EValue(bad_size[0]), EValue(bad_size[1]), EValue(bad_size[2])};
484 EValue* bad_size_wrapped_vals[3] = {
485 &bad_size_as_evals[0], &bad_size_as_evals[1], &bad_size_as_evals[2]};
486 int64_t bad_size_unwrapped_vals[3] = {0, 0, 0};
487 EValue bad_size_int_list_evalue = EValue(BoxedEvalueList<int64_t>(
488 bad_size_wrapped_vals, bad_size_unwrapped_vals, 3));
489
490 auto out = tf.make({3, 1, 0}, {}, {});
491 EValue out_evalue = EValue(out);
492 EXPECT_EQ(out.const_data_ptr(), nullptr);
493
494 // good size test
495 EValue* stack[3] = {&self_evalue, &size_int_list_evalue, &out_evalue};
496 getOpsFn("executorch_prim::et_view.default")(context, stack);
497 EXPECT_TENSOR_EQ(out, tf.make({3, 1, 0}, {}));
498 EXPECT_EQ(out.const_data_ptr(), self.const_data_ptr());
499
500 // bad size test
501 EValue* bad_stack[3] = {&self_evalue, &bad_size_int_list_evalue, &out_evalue};
502 ET_EXPECT_DEATH(
503 getOpsFn("executorch_prim::et_view.default")(context, bad_stack), "");
504 }
505
TEST_F(RegisterPrimOpsTest,TestCeil)506 TEST_F(RegisterPrimOpsTest, TestCeil) {
507 std::array<double, 10> inputs = {
508 0.0, 0.25, 0.5, 0.75, 1.0, 1.75, -0.5, -1.0, -1.5, 9.999999};
509 std::array<int64_t, 10> expected = {0, 1, 1, 1, 1, 2, 0, -1, -1, 10};
510
511 for (auto i = 0; i < inputs.size(); i++) {
512 EValue values[2];
513 values[0] = EValue(inputs[i]);
514 values[1] = EValue(0.0);
515
516 EValue* stack[2];
517 for (size_t j = 0; j < 2; j++) {
518 stack[j] = &values[j];
519 }
520
521 getOpsFn("executorch_prim::ceil.Scalar")(context, stack);
522 EXPECT_EQ(stack[1]->toInt(), expected[i]);
523 }
524 }
525
TEST_F(RegisterPrimOpsTest,TestRound)526 TEST_F(RegisterPrimOpsTest, TestRound) {
527 // Note that Python uses round-to-even for halfway values.
528 std::array<double, 10> inputs = {
529 0.0, 0.25, 0.5, 0.75, 1.0, 1.5, -0.5, -1.0, -1.5, 9.999999};
530 std::array<int64_t, 10> expected = {0, 0, 0, 1, 1, 2, 0, -1, -2, 10};
531
532 for (auto i = 0; i < inputs.size(); i++) {
533 EValue values[2];
534 values[0] = EValue(inputs[i]);
535 values[1] = EValue(0.0);
536
537 EValue* stack[2];
538 for (size_t j = 0; j < 2; j++) {
539 stack[j] = &values[j];
540 }
541
542 getOpsFn("executorch_prim::round.Scalar")(context, stack);
543 EXPECT_EQ(stack[1]->toInt(), expected[i]);
544 }
545 }
546
TEST_F(RegisterPrimOpsTest,TestTrunc)547 TEST_F(RegisterPrimOpsTest, TestTrunc) {
548 std::array<double, 10> inputs = {
549 0.0, 0.25, 0.5, 0.75, 1.0, 1.75, -0.5, -1.0, -1.5, 9.999999};
550 std::array<int64_t, 10> expected = {0, 0, 0, 0, 1, 1, 0, -1, -1, 9};
551
552 for (auto i = 0; i < inputs.size(); i++) {
553 EValue values[2];
554 values[0] = EValue(inputs[i]);
555 values[1] = EValue(0.0);
556
557 EValue* stack[2];
558 for (size_t j = 0; j < 2; j++) {
559 stack[j] = &values[j];
560 }
561
562 getOpsFn("executorch_prim::trunc.Scalar")(context, stack);
563 EXPECT_EQ(stack[1]->toInt(), expected[i]);
564 }
565 }
566
567 } // namespace executor
568 } // namespace torch
569