1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15
16 #include <gtest/gtest.h>
17
18 using namespace ::testing;
19 using executorch::aten::Scalar;
20 using executorch::aten::ScalarType;
21 using executorch::aten::Tensor;
22 using executorch::runtime::testing::TensorFactory;
23 using torch::executor::testing::SupportedFeatures;
24 namespace etrt = executorch::runtime;
25
26 class OpSubOutTest : public OperatorTest {
27 protected:
op_sub_out(const Tensor & self,const Tensor & other,const Scalar & alpha,Tensor & out)28 Tensor& op_sub_out(
29 const Tensor& self,
30 const Tensor& other,
31 const Scalar& alpha,
32 Tensor& out) {
33 return torch::executor::aten::sub_outf(context_, self, other, alpha, out);
34 }
35
36 template <ScalarType DTYPE_A, ScalarType DTYPE_B, ScalarType DTYPE_OUT>
test_sub()37 void test_sub() {
38 TensorFactory<DTYPE_A> tf_a;
39 TensorFactory<DTYPE_B> tf_b;
40 TensorFactory<DTYPE_OUT> tf_out;
41
42 const std::vector<int32_t> sizes = {2, 2};
43
44 // Destination for the sum.
45 Tensor out = tf_out.zeros(sizes);
46
47 // sub two tensors.
48 op_sub_out(
49 tf_a.make(sizes, /*data=*/{1, 2, 4, 8}),
50 tf_b.ones(sizes),
51 /*alpha=*/1,
52 out);
53
54 // Check that it matches the expected output.
55 EXPECT_TENSOR_EQ(out, tf_out.make(sizes, /*data=*/{0, 1, 3, 7}));
56 }
57
58 template <ScalarType DTYPE_A, ScalarType DTYPE_B>
test_sub_enumerate_out_types()59 void test_sub_enumerate_out_types() {
60 test_sub<DTYPE_A, DTYPE_B, ScalarType::Half>();
61 test_sub<DTYPE_A, DTYPE_B, ScalarType::Float>();
62 test_sub<DTYPE_A, DTYPE_B, ScalarType::Double>();
63 // Integral out type is only allowed if both inputs are integral types
64 if (etrt::isIntegralType(DTYPE_A, false) &&
65 etrt::isIntegralType(DTYPE_B, false)) {
66 test_sub<DTYPE_A, DTYPE_B, ScalarType::Int>();
67 test_sub<DTYPE_A, DTYPE_B, ScalarType::Long>();
68 }
69 }
70
71 template <ScalarType DTYPE_A>
test_sub_enumerate_b_types()72 void test_sub_enumerate_b_types() {
73 #define ENUMERATE_TEST_ENTRY(ctype, dtype) \
74 test_sub_enumerate_out_types<DTYPE_A, ScalarType::dtype>();
75
76 ET_FORALL_REAL_TYPES_AND(Half, ENUMERATE_TEST_ENTRY)
77
78 #undef ENUMERATE_TEST_ENTRY
79 }
80
81 // Common testing for substraction between two floating point Tensors.
82 template <ScalarType DTYPE>
test_floating_point_sub_out()83 void test_floating_point_sub_out() {
84 TensorFactory<DTYPE> tf;
85
86 const std::vector<int32_t> sizes = {2, 2};
87
88 // Destination for the subtraction.
89 Tensor out = tf.zeros(sizes);
90
91 // Performs substraction on two tensors.
92 op_sub_out(
93 tf.make(sizes, /*data=*/{1.1, 2.2, 4.4, 8.8}),
94 tf.ones(sizes),
95 /*alpha=*/1,
96 out);
97
98 // Check that it matches the expected output.
99 EXPECT_TENSOR_CLOSE(out, tf.make(sizes, /*data=*/{0.1, 1.2, 3.4, 7.8}));
100 }
101
test_sub_enumerate_a_types()102 void test_sub_enumerate_a_types() {
103 #define ENUMERATE_TEST_ENTRY(ctype, dtype) \
104 test_sub_enumerate_b_types<ScalarType::dtype>();
105
106 ET_FORALL_REAL_TYPES_AND(Half, ENUMERATE_TEST_ENTRY)
107
108 #undef ENUMERATE_TEST_ENTRY
109 }
110
111 template <ScalarType DTYPE>
test_broadcast_rank1_scalar()112 void test_broadcast_rank1_scalar() {
113 TensorFactory<DTYPE> tf;
114
115 Tensor a = tf.make({2, 1, 3}, {2, 3, 4, 5, 6, 7});
116 Tensor b = tf.make({1}, {2});
117
118 // Destination for the broadcasting div. Follow the broadcasting rules in
119 // https://fburl.com/n9wl4d0o
120 Tensor out = tf.zeros({2, 1, 3});
121
122 op_sub_out(a, b, 1, out);
123
124 Tensor ret = tf.make({2, 1, 3}, {0, 1, 2, 3, 4, 5});
125 EXPECT_TENSOR_EQ(out, ret);
126
127 op_sub_out(b, a, 1, out);
128 ret = tf.make({2, 1, 3}, {0, -1, -2, -3, -4, -5});
129 EXPECT_TENSOR_EQ(out, ret);
130 }
131 };
132
133 class OpSubScalarOutTest : public OperatorTest {
134 protected:
op_sub_scalar_out(const Tensor & self,const Scalar & other,const Scalar & alpha,Tensor & out)135 Tensor& op_sub_scalar_out(
136 const Tensor& self,
137 const Scalar& other,
138 const Scalar& alpha,
139 Tensor& out) {
140 return torch::executor::aten::sub_outf(context_, self, other, alpha, out);
141 }
142 };
143
144 /**
145 * Uses the function templates above to test all valid combinations of inputs
146 * and output dtypes
147 */
TEST_F(OpSubOutTest,AllRealDtypesSupported)148 TEST_F(OpSubOutTest, AllRealDtypesSupported) {
149 test_sub_enumerate_a_types();
150 }
151
TEST_F(OpSubOutTest,FloatTensors)152 TEST_F(OpSubOutTest, FloatTensors) {
153 test_floating_point_sub_out<ScalarType::Float>();
154 }
155
TEST_F(OpSubOutTest,DoubleTensors)156 TEST_F(OpSubOutTest, DoubleTensors) {
157 test_floating_point_sub_out<ScalarType::Double>();
158 }
159
TEST_F(OpSubOutTest,BroadcastSupported)160 TEST_F(OpSubOutTest, BroadcastSupported) {
161 TensorFactory<ScalarType::Float> tf;
162
163 Tensor a = tf.make({2, 1, 2, 1}, {7, 8, 9, 10});
164 Tensor b = tf.make({2, 1, 4}, {1, 1, 1, 1, 2, 2, 2, 2});
165 Tensor ref =
166 tf.make({2, 2, 2, 4}, {6, 6, 6, 6, 7, 7, 7, 7, 5, 5, 5, 5, 6, 6, 6, 6,
167 8, 8, 8, 8, 9, 9, 9, 9, 7, 7, 7, 7, 8, 8, 8, 8});
168
169 // Destination for the broadcasting sum. Follow the broadcasting rules in
170 // https://fburl.com/n9wl4d0o
171 Tensor out = tf.zeros({2, 2, 2, 4});
172
173 op_sub_out(a, b, 1, out);
174
175 EXPECT_TENSOR_EQ(out, ref);
176 }
177
TEST_F(OpSubOutTest,BroadcastSupported2)178 TEST_F(OpSubOutTest, BroadcastSupported2) {
179 TensorFactory<ScalarType::Float> tf;
180
181 Tensor a = tf.make({3, 2, 1}, {2, 3, 4, 5, 6, 7});
182 Tensor b = tf.make({1, 2, 1}, {2, 3});
183
184 // Destination for the broadcasting div. Follow the broadcasting rules in
185 // https://fburl.com/n9wl4d0o
186 Tensor out = tf.zeros({3, 2, 1});
187
188 op_sub_out(a, b, 1, out);
189
190 Tensor ret = tf.make({3, 2, 1}, {0, 0, 2, 2, 4, 4});
191 EXPECT_TENSOR_EQ(out, ret);
192 }
193
TEST_F(OpSubOutTest,BroadcastScalarSupported1)194 TEST_F(OpSubOutTest, BroadcastScalarSupported1) {
195 test_broadcast_rank1_scalar<ScalarType::Float>();
196 test_broadcast_rank1_scalar<ScalarType::Half>();
197 }
198
TEST_F(OpSubOutTest,BroadcastScalarSupported2)199 TEST_F(OpSubOutTest, BroadcastScalarSupported2) {
200 TensorFactory<ScalarType::Float> tf;
201
202 Tensor a = tf.make({1, 1, 1}, {8});
203 Tensor b = tf.make({3, 1, 1}, {2, 4, 8});
204
205 // Destination for the broadcasting div. Follow the broadcasting rules in
206 // https://fburl.com/n9wl4d0o
207 Tensor out = tf.zeros({3, 1, 1});
208
209 op_sub_out(a, b, 1, out);
210
211 Tensor ret = tf.make({3, 1, 1}, {6, 4, 0});
212 EXPECT_TENSOR_EQ(out, ret);
213
214 std::swap(a, b);
215 out = tf.zeros({3, 1, 1});
216 op_sub_out(a, b, 1, out);
217 ret = tf.make({3, 1, 1}, {-6, -4, 0});
218 EXPECT_TENSOR_EQ(out, ret);
219 }
220
TEST_F(OpSubOutTest,BroadcastScalarRank0Supported)221 TEST_F(OpSubOutTest, BroadcastScalarRank0Supported) {
222 TensorFactory<ScalarType::Float> tf;
223
224 Tensor a = tf.make({1}, {5});
225 Tensor b = tf.make({}, {2});
226
227 Tensor out = tf.zeros({1});
228
229 op_sub_out(a, b, 1, out);
230
231 Tensor ret = tf.make({1}, {3});
232 EXPECT_TENSOR_EQ(out, ret);
233
234 op_sub_out(b, a, 1, out);
235
236 ret = tf.make({1}, {-3});
237 EXPECT_TENSOR_EQ(out, ret);
238 }
239
240 //
241 // Death Tests
242 //
243
TEST_F(OpSubOutTest,IntTensorFloatAlphaDies)244 TEST_F(OpSubOutTest, IntTensorFloatAlphaDies) {
245 // op_sub_out() doesn't handle floating alpha for intergal inputs
246 TensorFactory<ScalarType::Int> tf;
247
248 const std::vector<int32_t> sizes = {2, 2};
249
250 // Destination for the op.
251 Tensor out = tf.zeros(sizes);
252
253 // Subtraction operation on two integral tensor with floating alpha
254 // should cause an assertion and kill the test process.
255 ET_EXPECT_KERNEL_FAILURE(
256 context_, op_sub_out(tf.ones(sizes), tf.ones(sizes), /*alpha=*/.7, out));
257 }
258
TEST_F(OpSubOutTest,BoolInputTensorsFail)259 TEST_F(OpSubOutTest, BoolInputTensorsFail) {
260 TensorFactory<ScalarType::Bool> tf;
261
262 const std::vector<int32_t> sizes = {2, 2};
263
264 Tensor a = tf.make(sizes, /*data=*/{false, true, false, true});
265 Tensor b = tf.make(sizes, /*data=*/{false, true, true, true});
266
267 Tensor out = tf.zeros(sizes);
268
269 ET_EXPECT_KERNEL_FAILURE(context_, op_sub_out(a, b, /*alpha=*/1, out));
270 }
271
TEST_F(OpSubOutTest,IntOutputWithFloatInputDies)272 TEST_F(OpSubOutTest, IntOutputWithFloatInputDies) {
273 TensorFactory<ScalarType::Int> tfi;
274 TensorFactory<ScalarType::Float> tff;
275
276 const std::vector<int32_t> sizes = {2, 2};
277
278 // Addends.
279 Tensor a = tfi.make(sizes, /*data=*/{2, 4, 3, 3});
280 Tensor b = tff.make(sizes, /*data=*/{2, 4, 3, 3});
281
282 // Destination for the sum.
283 Tensor out = tfi.zeros(sizes);
284
285 ET_EXPECT_KERNEL_FAILURE(context_, op_sub_out(a, b, /*alpha=*/1, out));
286 }
287
TEST_F(OpSubOutTest,BoolOutputWithIntegralInput)288 TEST_F(OpSubOutTest, BoolOutputWithIntegralInput) {
289 // add_out() doesn't handle Bool.
290 TensorFactory<ScalarType::Bool> tf;
291 TensorFactory<ScalarType::Int> tfi;
292
293 const std::vector<int32_t> sizes = {2, 2};
294
295 // Addends.
296 Tensor a = tfi.make(sizes, /*data=*/{false, true, true, false});
297 Tensor b = tfi.make(sizes, /*data=*/{2, 3, 4, 3});
298
299 // Destination for the sum.
300 Tensor out = tf.zeros(sizes);
301
302 ET_EXPECT_KERNEL_FAILURE(context_, op_sub_out(a, b, /*alpha=*/1, out));
303 }
304
TEST_F(OpSubOutTest,MismatchedNonBroadcastableInputShapesDies)305 TEST_F(OpSubOutTest, MismatchedNonBroadcastableInputShapesDies) {
306 TensorFactory<ScalarType::Int> tf;
307
308 // Subtrahend and minuend with different shapes.
309 Tensor a = tf.ones(/*sizes=*/{4, 2});
310 Tensor b = tf.ones(/*sizes=*/{2, 2});
311
312 // Destination for the subtraction; matches the shape of one of the inputs.
313 Tensor out = tf.zeros(/*sizes=*/{8});
314
315 // Performing substraction on two mismatched tensors should cause an assertion
316 // and kill the test process.
317 ET_EXPECT_KERNEL_FAILURE(context_, op_sub_out(a, b, /*alpha=*/0, out));
318 }
319
TEST_F(OpSubOutTest,MismatchedOutputShapesDies)320 TEST_F(OpSubOutTest, MismatchedOutputShapesDies) {
321 if (SupportedFeatures::get()->output_resize) {
322 GTEST_SKIP()
323 << "The current kernel supports implicitly resizing output tensor";
324 }
325
326 TensorFactory<ScalarType::Int> tf;
327
328 const std::vector<int32_t> sizes = {2, 2};
329
330 // Subtrahend and minuend with the same shapes.
331 Tensor a = tf.ones(sizes);
332 Tensor b = tf.ones(sizes);
333
334 // Destination with a different shape.
335 Tensor out = tf.zeros(/*sizes=*/{4});
336
337 // Performing substraction two tensors into a mismatched output should cause
338 // an assertion and kill the test process.
339 ET_EXPECT_KERNEL_FAILURE(context_, op_sub_out(a, b, /*alpha=*/0, out));
340 }
341
TEST_F(OpSubOutTest,BroadcastDimSizeIsOneAB)342 TEST_F(OpSubOutTest, BroadcastDimSizeIsOneAB) {
343 TensorFactory<ScalarType::Float> tf;
344
345 Tensor x = tf.make(
346 {3, 2},
347 {0.20342785120010376,
348 0.8211539387702942,
349 0.12307500839233398,
350 0.8268751502037048,
351 0.6484894752502441,
352 0.8079752326011658});
353 Tensor y = tf.make({1, 2}, {0.22279858589172363, 0.3636378049850464});
354 Tensor expected_result = tf.make(
355 {3, 2},
356 {-0.019370734691619873,
357 0.4575161337852478,
358 -0.09972357749938965,
359 0.46323734521865845,
360 0.4256908893585205,
361 0.4443374276161194});
362
363 Tensor out = tf.zeros({3, 2});
364 Tensor ret = op_sub_out(x, y, 1, out);
365 EXPECT_TENSOR_CLOSE(out, expected_result);
366 }
367
TEST_F(OpSubOutTest,BroadcastDimSizeMissingAB)368 TEST_F(OpSubOutTest, BroadcastDimSizeMissingAB) {
369 TensorFactory<ScalarType::Float> tf;
370
371 Tensor x = tf.make(
372 {3, 2},
373 {0.20342785120010376,
374 0.8211539387702942,
375 0.12307500839233398,
376 0.8268751502037048,
377 0.6484894752502441,
378 0.8079752326011658});
379 Tensor y = tf.make({2}, {0.22279858589172363, 0.3636378049850464});
380 Tensor expected_result = tf.make(
381 {3, 2},
382 {-0.019370734691619873,
383 0.4575161337852478,
384 -0.09972357749938965,
385 0.46323734521865845,
386 0.4256908893585205,
387 0.4443374276161194});
388
389 Tensor out = tf.zeros({3, 2});
390 Tensor ret = op_sub_out(x, y, 1, out);
391 EXPECT_TENSOR_CLOSE(out, expected_result);
392 }
393
TEST_F(OpSubOutTest,BroadcastDimSizeIsOneBA)394 TEST_F(OpSubOutTest, BroadcastDimSizeIsOneBA) {
395 TensorFactory<ScalarType::Float> tf;
396
397 Tensor x = tf.make({1, 2}, {0.22279858589172363, 0.3636378049850464});
398 Tensor y = tf.make(
399 {3, 2},
400 {0.20342785120010376,
401 0.8211539387702942,
402 0.12307500839233398,
403 0.8268751502037048,
404 0.6484894752502441,
405 0.8079752326011658});
406 Tensor expected_result = tf.make(
407 {3, 2},
408 {0.019370734691619873,
409 -0.4575161337852478,
410 0.09972357749938965,
411 -0.46323734521865845,
412 -0.4256908893585205,
413 -0.4443374276161194});
414
415 Tensor out = tf.zeros({3, 2});
416 Tensor ret = op_sub_out(x, y, 1, out);
417 EXPECT_TENSOR_CLOSE(out, expected_result);
418 }
419
TEST_F(OpSubOutTest,BroadcastDimSizeMissingBA)420 TEST_F(OpSubOutTest, BroadcastDimSizeMissingBA) {
421 TensorFactory<ScalarType::Float> tf;
422
423 Tensor x = tf.make({1, 2}, {0.22279858589172363, 0.3636378049850464});
424 Tensor y = tf.make(
425 {3, 2},
426 {0.20342785120010376,
427 0.8211539387702942,
428 0.12307500839233398,
429 0.8268751502037048,
430 0.6484894752502441,
431 0.8079752326011658});
432 Tensor expected_result = tf.make(
433 {3, 2},
434 {0.019370734691619873,
435 -0.4575161337852478,
436 0.09972357749938965,
437 -0.46323734521865845,
438 -0.4256908893585205,
439 -0.4443374276161194});
440
441 Tensor out = tf.zeros({3, 2});
442 Tensor ret = op_sub_out(x, y, 1, out);
443 EXPECT_TENSOR_CLOSE(out, expected_result);
444 }
445
TEST_F(OpSubOutTest,DynamicShapeUpperBoundSameAsExpected)446 TEST_F(OpSubOutTest, DynamicShapeUpperBoundSameAsExpected) {
447 TensorFactory<ScalarType::Float> tf;
448
449 Tensor x = tf.make(
450 {3, 2},
451 {0.44215160608291626,
452 0.17627692222595215,
453 0.46265703439712524,
454 0.04357701539993286,
455 0.838569700717926,
456 0.06833052635192871});
457 Tensor y = tf.make(
458 {3, 2},
459 {0.06382524967193604,
460 0.18627053499221802,
461 0.5863531231880188,
462 0.12181782722473145,
463 0.5662856698036194,
464 0.930520236492157});
465 Tensor expected_result = tf.make(
466 {3, 2},
467 {0.3783263564109802,
468 -0.00999361276626587,
469 -0.12369608879089355,
470 -0.07824081182479858,
471 0.27228403091430664,
472 -0.8621897101402283});
473
474 Tensor out =
475 tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
476 Tensor ret = op_sub_out(x, y, 1, out);
477 EXPECT_TENSOR_CLOSE(out, expected_result);
478 }
479
TEST_F(OpSubOutTest,DynamicShapeUpperBoundLargerThanExpected)480 TEST_F(OpSubOutTest, DynamicShapeUpperBoundLargerThanExpected) {
481 TensorFactory<ScalarType::Float> tf;
482
483 Tensor x = tf.make(
484 {3, 2},
485 {0.44215160608291626,
486 0.17627692222595215,
487 0.46265703439712524,
488 0.04357701539993286,
489 0.838569700717926,
490 0.06833052635192871});
491 Tensor y = tf.make(
492 {3, 2},
493 {0.06382524967193604,
494 0.18627053499221802,
495 0.5863531231880188,
496 0.12181782722473145,
497 0.5662856698036194,
498 0.930520236492157});
499 Tensor expected_result = tf.make(
500 {3, 2},
501 {0.3783263564109802,
502 -0.00999361276626587,
503 -0.12369608879089355,
504 -0.07824081182479858,
505 0.27228403091430664,
506 -0.8621897101402283});
507
508 Tensor out =
509 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
510 Tensor ret = op_sub_out(x, y, 1, out);
511 EXPECT_TENSOR_CLOSE(out, expected_result);
512 }
513
TEST_F(OpSubOutTest,DynamicShapeUnbound)514 TEST_F(OpSubOutTest, DynamicShapeUnbound) {
515 GTEST_SKIP() << "Dynamic shape not supported";
516 TensorFactory<ScalarType::Float> tf;
517
518 Tensor x = tf.make(
519 {3, 2},
520 {0.44215160608291626,
521 0.17627692222595215,
522 0.46265703439712524,
523 0.04357701539993286,
524 0.838569700717926,
525 0.06833052635192871});
526 Tensor y = tf.make(
527 {3, 2},
528 {0.06382524967193604,
529 0.18627053499221802,
530 0.5863531231880188,
531 0.12181782722473145,
532 0.5662856698036194,
533 0.930520236492157});
534 Tensor expected_result = tf.make(
535 {3, 2},
536 {0.3783263564109802,
537 -0.00999361276626587,
538 -0.12369608879089355,
539 -0.07824081182479858,
540 0.27228403091430664,
541 -0.8621897101402283});
542
543 Tensor out =
544 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
545 Tensor ret = op_sub_out(x, y, 1, out);
546 EXPECT_TENSOR_CLOSE(out, expected_result);
547 }
548
TEST_F(OpSubScalarOutTest,SanityCheck)549 TEST_F(OpSubScalarOutTest, SanityCheck) {
550 TensorFactory<ScalarType::Int> tf_a;
551 TensorFactory<ScalarType::Float> tf_out;
552
553 const std::vector<int32_t> sizes = {2, 2};
554
555 Tensor out = tf_out.zeros(sizes);
556
557 op_sub_scalar_out(tf_a.make(sizes, {1, 2, 4, 8}), 0.5, /*alpha=*/1.5, out);
558
559 // Check that it matches the expected output.
560 EXPECT_TENSOR_EQ(out, tf_out.make(sizes, {0.25, 1.25, 3.25, 7.25}));
561 }
562
TEST_F(OpSubScalarOutTest,OptimizedSanityCheck)563 TEST_F(OpSubScalarOutTest, OptimizedSanityCheck) {
564 TensorFactory<ScalarType::Float> tf;
565
566 const std::vector<int32_t> sizes = {2, 2};
567
568 Tensor out = tf.zeros(sizes);
569
570 op_sub_scalar_out(
571 tf.make(sizes, {6.3, 2.1, 5.6, 8.2}), 1.9, /*alpha=*/2.8, out);
572
573 // Check that it matches the expected output.
574 EXPECT_TENSOR_CLOSE(out, tf.make(sizes, {0.98, -3.22, 0.28, 2.88}));
575 }
576
TEST_F(OpSubScalarOutTest,DtypeTest_float16_float_int_float16)577 TEST_F(OpSubScalarOutTest, DtypeTest_float16_float_int_float16) {
578 torch::executor::testing::TensorFactory<exec_aten::ScalarType::Half> tfHalf;
579
580 exec_aten::Tensor self = tfHalf.ones({2, 2});
581 exec_aten::Scalar other = exec_aten::Scalar(-1.0);
582 exec_aten::Scalar alpha = exec_aten::Scalar(1);
583 exec_aten::Tensor out = tfHalf.zeros({2, 2});
584 exec_aten::Tensor out_expected = tfHalf.full({2, 2}, 2.0);
585 op_sub_scalar_out(self, other, alpha, out);
586 EXPECT_TENSOR_CLOSE(out, out_expected);
587 }
588