xref: /aosp_15_r20/external/executorch/kernels/test/op_select_copy_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 
17 #include <gtest/gtest.h>
18 #include <sys/types.h>
19 
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25 
26 class OpSelectCopyIntOutTest : public OperatorTest {
27  protected:
op_select_copy_int_out(const Tensor & self,int64_t dim,int64_t index,Tensor & out)28   Tensor& op_select_copy_int_out(
29       const Tensor& self,
30       int64_t dim,
31       int64_t index,
32       Tensor& out) {
33     return torch::executor::aten::select_copy_outf(
34         context_, self, dim, index, out);
35   }
36 
37   template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()38   void test_dtype() {
39     TensorFactory<DTYPE> tf;
40 
41     // Based on the split defintion, if we split any dim()=3 and size(1)=2
42     // tensor along first dim to two tensors [ret_0, ret_1], the ret_0 and ret_1
43     // shall be equal to x[:, 0, :] and x[:, 1, :] e.g. x[i, 0, j] = ret_0[i, j]
44     // and x[i, 1, j] = ret_1[i, j] for any i in [-x.size(0), x.size(0)) and j
45     // in
46     // [-x.size(2), x.size(2))
47     // Therefore we design the following tensor x for test easily: it is a
48     // tensor formed by stacking tensors ones(3, 4) and zeros(3,4) along the
49     // first dim. So if we select the tensor along the first dim by the above
50     // rules, the ret_0 should be ones(3, 4) and ret_1 should be zeros(3, 4)
51 
52     // clang-format off
53     Tensor x = tf.make(
54         {3, 2, 4},
55         {
56           // all ones below are from x,
57           // and all zeros are from y.
58           // [0, :, :]
59           1, 1, 1, 1, // [0, 0, :]
60           0, 0, 0, 0, // [0, 1, :]
61 
62           // [1, :, :]
63           1, 1, 1, 1, // [1, 0, :]
64           0, 0, 0, 0, // [1, 1, :]
65 
66           // [2, :, :]
67           1, 1, 1, 1, // [2, 0, :]
68           0, 0, 0, 0, // [2, 1, :]
69         });
70     // clang-format on
71 
72     // Expected values for out_0 and ret_0 after the test are all ones(3, 4)
73     // based on the above rules. So here we set the default value of out_0 as
74     // zeros(3, 4) on purpose, to eliminate the influence to the final result
75     // from initial value. Same for out_1 and ret_1.
76 
77     Tensor out_0 = tf.zeros({3, 4});
78     Tensor out_1 = tf.ones({3, 4});
79     Tensor ret_0 = op_select_copy_int_out(x, /*dim=*/1, /*index=*/0, out_0);
80     Tensor ret_1 = op_select_copy_int_out(x, /*dim=*/1, /*index=*/1, out_1);
81 
82     EXPECT_TENSOR_EQ(ret_0, out_0);
83     EXPECT_TENSOR_EQ(ret_1, out_1);
84 
85     EXPECT_TENSOR_EQ(ret_0, tf.ones({3, 4}));
86     EXPECT_TENSOR_EQ(ret_1, tf.zeros({3, 4}));
87   }
88 
89   // Run the test by selecting Tensor x on given dim and all available indexes
90   // on that dimension
run_test_cases(const Tensor & x,ssize_t dim,const std::vector<Tensor> & expected)91   void run_test_cases(
92       const Tensor& x,
93       ssize_t dim,
94       const std::vector<Tensor>& expected) {
95     // Generated out tensor sharing same size and dtype with expected tensor
96     TensorFactory<ScalarType::Double> tf;
97 
98     const std::vector<int32_t> out_size(
99         expected[0].sizes().begin(), expected[0].sizes().end());
100     Tensor out = tf.ones(out_size);
101 
102     for (ssize_t idx = 0; idx < x.size(dim); idx++) {
103       // Should always return the provided out Tensor.
104       // The ret shall meet the expectation.
105       Tensor ret = op_select_copy_int_out(x, dim, idx, out);
106       EXPECT_TENSOR_EQ(out, ret);
107       EXPECT_TENSOR_EQ(out, expected[idx]);
108 
109       ret = op_select_copy_int_out(x, dim, /*index=*/idx - x.size(dim), out);
110       EXPECT_TENSOR_EQ(out, ret);
111 
112       EXPECT_TENSOR_EQ(out, expected[idx]);
113     }
114   }
115 };
116 
TEST_F(OpSelectCopyIntOutTest,SelectFrontDimAllIndexes)117 TEST_F(OpSelectCopyIntOutTest, SelectFrontDimAllIndexes) {
118   TensorFactory<ScalarType::Double> tf;
119 
120   // clang-format off
121   Tensor x = tf.make(
122       {2, 3, 4},
123       {
124           // [0, :, :]
125           1.,   2.,   3.,   4., // [0, 0, :]
126           5.,   6.,   7.,   8., // [0, 1, :]
127           9.,  10.,  11.,  12., // [0, 2, :]
128 
129           // [1, :, :]
130          -1.,  -2.,  -3.,  -4., // [1, 0, :]
131          -5.,  -6.,  -7.,  -8., // [1, 1, :]
132          -9., -10., -11., -12., // [1, 2, :]
133       });
134   // clang-format on
135 
136   // Try to select the tensor from the input front (0th dimension)
137   // The size of output tensor should follow these rules:
138   // - output.size(i) shall equal input.size(i) if i < dim,
139   // - output.size(i) shall equal input.size(i+1) if i >= dim
140   const std::vector<int32_t> out_size = {3, 4};
141 
142   Tensor out = tf.zeros(out_size);
143 
144   // clang-format off
145   std::vector<Tensor> expected_rets = {
146     // Expected result when choosing from the 0th dimension and 0th index
147     // The result should equal x[0,:, :]
148     tf.make(
149       out_size,
150       {
151         1.,   2.,   3.,   4., // [0, :]
152         5.,   6.,   7.,   8., // [1, :]
153         9.,  10.,  11.,  12., // [2, :]
154       }),
155 
156     // Expected result when choosing from the 0th dimension and 1st index
157     // The result should euqal x[1, :, :]
158     tf.make(
159       out_size,
160       {
161         -1.,  -2.,  -3.,  -4., // [0, :]
162         -5.,  -6.,  -7.,  -8., // [1, :]
163         -9., -10., -11., -12., // [2, :]
164       })
165   };
166   // clang-format on
167 
168   run_test_cases(x, /*dim=*/0, expected_rets);
169 }
170 
TEST_F(OpSelectCopyIntOutTest,SelectMiddleDimAllIndexes)171 TEST_F(OpSelectCopyIntOutTest, SelectMiddleDimAllIndexes) {
172   TensorFactory<ScalarType::Double> tf;
173 
174   // clang-format off
175   Tensor x = tf.make(
176       {2, 3, 4},
177       {
178           // [0, :, :]
179           1.,   2.,   3.,   4., // [0, 0, :]
180           5.,   6.,   7.,   8., // [0, 1, :]
181           9.,  10.,  11.,  12., // [0, 2, :]
182 
183           // [1, :, :]
184          -1.,  -2.,  -3.,  -4., // [1, 0, :]
185          -5.,  -6.,  -7.,  -8., // [1, 1, :]
186          -9., -10., -11., -12., // [1, 2, :]
187       });
188   // clang-format on
189 
190   // Try to select the tensor from the input front (0th dimension)
191   // The size of output tensor should follow these rules:
192   // - output.size(i) shall equal input.size(i) if i < dim,
193   // - output.size(i) shall equal input.size(i+1) if i >= dim
194   const std::vector<int32_t> out_size = {2, 4};
195 
196   Tensor out = tf.zeros(out_size);
197 
198   // clang-format off
199   std::vector<Tensor> expected_rets = {
200     // Expected result when choosing from the 1st dimension and 0th index
201     // The result should equal x[:,0, :]
202     tf.make(
203       out_size,
204       {
205          1.,   2.,   3.,   4., // [0, :]
206         -1.,  -2.,  -3.,  -4., // [1, :]
207       }),
208     // Expected result when choosing from the 1st dimension and 1st index
209     // The result should equal x[:, 1, :]
210     tf.make(
211       out_size,
212       {
213          5.,   6.,   7.,   8., // [0, :]
214         -5.,  -6.,  -7.,  -8., // [1, :]
215       }),
216     // Expected result when choosing from the 1st dimension and 2th index
217     // The result should equal x[:,2, :]
218     tf.make(
219       out_size,
220       {
221          9.,  10.,  11.,  12., // [0, :]
222         -9., -10., -11., -12., // [1, :]
223       })
224   };
225   // clang-format on
226 
227   run_test_cases(x, /*dim=*/1, expected_rets);
228 }
229 
TEST_F(OpSelectCopyIntOutTest,SelectEndDimAllIndexes)230 TEST_F(OpSelectCopyIntOutTest, SelectEndDimAllIndexes) {
231   TensorFactory<ScalarType::Double> tf;
232 
233   // clang-format off
234   Tensor x = tf.make(
235     {2, 3, 4},
236     {
237       // [0, :, :]
238       1.,   2.,   3.,   4., // [0, 0, :]
239       5.,   6.,   7.,   8., // [0, 1, :]
240       9.,  10.,  11.,  12., // [0, 2, :]
241 
242       // [1, :, :]
243       -1.,  -2.,  -3.,  -4., // [1, 0, :]
244       -5.,  -6.,  -7.,  -8., // [1, 1, :]
245       -9., -10., -11., -12., // [1, 2, :]
246     });
247   // clang-format on
248 
249   // Try to select the tensor from the input front (0th dimension)
250   // The size of output tensor should follow these rules:
251   // - output.size(i) shall equal input.size(i) if i < dim,
252   // - output.size(i) shall equal input.size(i+1) if i >= dim
253   const std::vector<int32_t> out_size = {2, 3};
254 
255   Tensor out = tf.zeros(out_size);
256 
257   // clang-format off
258   std::vector<Tensor> expected_rets = {
259     // Expected result when choosing from the 2nd dimension and 0th index
260     // The result should equal x[:,:, 0] (a.k.a 0th column of x data layout)
261     tf.make(
262       out_size,
263       {
264          1.,  5.,  9.,  // [0, :]
265         -1., -5., -9.,  // [1, :]
266       }),
267     // Expected result when choosing from the 2nd dimension and 1st index
268     // The result should equal x[:,:, 1] (a.k.a 1st column of x data layout)
269     tf.make(
270       out_size,
271       {
272          2.,  6.,  10.,  // [0, :]
273         -2., -6., -10.,  // [1, :]
274       }),
275     // Expected result when choosing from the 2nd dimension and 2nd index
276     // The result should equal x[:,:, 2] (a.k.a 2nd column of x data layout)
277     tf.make(
278       out_size,
279       {
280          3.,  7.,  11.,  // [0, :]
281         -3., -7., -11.,  // [1, :]
282       }),
283     // Expected result when choosing from the 2nd dimension and 3rd index
284     // The result should equal x[:,:, 3] (a.k.a 3rd column of x data layout)
285     tf.make(
286       out_size,
287       {
288          4.,  8.,  12.,  // [0, :]
289         -4., -8., -12.,  // [1, :]
290       })
291   };
292   // clang-format on
293 
294   run_test_cases(x, /*dim=*/2, expected_rets);
295 }
296 
297 /// A generic smoke test that works for any dtype that supports ones() and
298 /// zeros().
TEST_F(OpSelectCopyIntOutTest,AllDtypesSupported)299 TEST_F(OpSelectCopyIntOutTest, AllDtypesSupported) {
300 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
301   ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
302 #undef TEST_ENTRY
303   // TODO: Also add tests for half, complex, quantized, and other types. Easiest
304   // way to do that would be to make TensorFactory support zeros() and ones()
305   // for those types.
306 }
307 
308 //////////////////////////////////////////////////////////////////////////////
309 // The following tests focus on empty-size tensor and empty tensor.
310 // Here we first define the term:
311 // empty-size tensor: size is [] but do have data (e.g.tensor(5))
312 // empty tensor: size is not [] and the size of at least one
313 // dim is zero, and does not have data in it (e.g ones(1,0,2,3))
314 
315 // In this test we are gonnna find if our select function support vector tensor
316 // input and empty-size tensor output. Such combination is quite normal in real
317 // world (e.g. select(torch.range(10), 0, 5, out) == tensor(5))
TEST_F(OpSelectCopyIntOutTest,VectorInputSupported)318 TEST_F(OpSelectCopyIntOutTest, VectorInputSupported) {
319   TensorFactory<ScalarType::Int> tf;
320 
321   Tensor x = tf.make({10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
322 
323   // Make an empty-size out tensor and demonstrate that it has data.
324   Tensor out = tf.make({}, {0});
325   EXPECT_EQ(out.numel(), 1);
326 
327   // pass the empty-size tensor to the function,
328   Tensor expect = tf.make({}, {5});
329   op_select_copy_int_out(x, /*dim=*/0, /*index=*/5, out);
330   EXPECT_TENSOR_EQ(out, expect);
331 }
332 
333 // This test focuses on the support for empty tensor (dim() > 0) input and empty
334 // tensor output
TEST_F(OpSelectCopyIntOutTest,EmptyTensorNonZeroNDimsInputSupported)335 TEST_F(OpSelectCopyIntOutTest, EmptyTensorNonZeroNDimsInputSupported) {
336   TensorFactory<ScalarType::Int> tf;
337 
338   // Using empty tensors as input.
339   Tensor x = tf.make({3, 0, 10, 3}, {});
340   EXPECT_EQ(x.numel(), 0);
341 
342   // Output whose shape is appropriate for selecting along dim(2)
343   Tensor out = tf.make({3, 0, 3}, {});
344   EXPECT_EQ(out.numel(), 0);
345 
346   Tensor ret = op_select_copy_int_out(x, /*dim=*/2, /*index=*/3, out);
347   EXPECT_EQ(ret.numel(), 0);
348   // Success if it doesn't assert on the weird-shaped empty input and the
349   // ret is still a empty array
350 }
351 
352 // Apply select on dim() == 0 empty tensor input and empty tensor output
TEST_F(OpSelectCopyIntOutTest,EmptyTensorZeroNDimsInputDies)353 TEST_F(OpSelectCopyIntOutTest, EmptyTensorZeroNDimsInputDies) {
354   TensorFactory<ScalarType::Int> tf;
355 
356   // Using empty tensors as input.
357   Tensor x = tf.make({0}, {});
358   EXPECT_EQ(x.numel(), 0);
359 
360   // Output whose shape is appropriate for selecting along dim(0)
361   Tensor out = tf.make({}, {0});
362   EXPECT_EQ(out.numel(), 1);
363 
364   // Expected failure when slicing on the dimension with length 0 since no space
365   // on the dimension could be sliced. (out of bound error)
366   ET_EXPECT_KERNEL_FAILURE(
367       context_, op_select_copy_int_out(x, /*dim=*/0, /*index=*/0, out));
368 }
369 ///////////////////////////////////////////////////////////////////////
370 
TEST_F(OpSelectCopyIntOutTest,DimOutOfBoundDies)371 TEST_F(OpSelectCopyIntOutTest, DimOutOfBoundDies) {
372   TensorFactory<ScalarType::Int> tf;
373 
374   Tensor x = tf.ones({1, 1, 1});
375   Tensor out = tf.zeros({1, 1});
376 
377   // Some invalid dim values.
378   const std::vector<int32_t> invalid_dims = {3, 4, 5, -4, -5, -6};
379   for (ssize_t dim : invalid_dims) {
380     ET_EXPECT_KERNEL_FAILURE(
381         context_, op_select_copy_int_out(x, dim, /*index=*/0, out));
382   }
383 }
384 
TEST_F(OpSelectCopyIntOutTest,MismatchedDtypesDies)385 TEST_F(OpSelectCopyIntOutTest, MismatchedDtypesDies) {
386   TensorFactory<ScalarType::Int> tf_int;
387   TensorFactory<ScalarType::Float> tf_float;
388   Tensor x = tf_int.zeros({1, 2, 2});
389 
390   // Size is compatible to the output, but a mismatched dtype.
391   Tensor out = tf_float.ones({2, 2});
392 
393   ET_EXPECT_KERNEL_FAILURE(
394       context_, op_select_copy_int_out(x, /*dim=*/0, /*index=*/0, out));
395 }
396 
TEST_F(OpSelectCopyIntOutTest,OutMatchNumelLackDimAtEndDies)397 TEST_F(OpSelectCopyIntOutTest, OutMatchNumelLackDimAtEndDies) {
398   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
399     GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
400   }
401   TensorFactory<ScalarType::Int> tf;
402   Tensor x = tf.zeros({1, 2, 2, 1});
403 
404   // Out shares the same dtype and numel as the expected output, but a
405   // mixmatched size (out.dim() should always one lower than x.dim())
406   Tensor out = tf.ones({2, 2});
407 
408   ET_EXPECT_KERNEL_FAILURE(
409       context_, op_select_copy_int_out(x, /*dim=*/0, /*index=*/0, out));
410 }
411 
TEST_F(OpSelectCopyIntOutTest,OutMatchNumelExtraDimAtFrontDies)412 TEST_F(OpSelectCopyIntOutTest, OutMatchNumelExtraDimAtFrontDies) {
413   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
414     GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
415   }
416   TensorFactory<ScalarType::Int> tf;
417   Tensor x = tf.zeros({2, 2});
418 
419   // Out shares the same dtype and numel as the expected output, but a
420   // mixmatched size (out.dim() should always one lower than x.dim())
421   Tensor out = tf.ones({1, 2});
422 
423   ET_EXPECT_KERNEL_FAILURE(
424       context_, op_select_copy_int_out(x, /*dim=*/0, /*index=*/0, out));
425 }
426 
TEST_F(OpSelectCopyIntOutTest,OutSizeMismatchDimDies)427 TEST_F(OpSelectCopyIntOutTest, OutSizeMismatchDimDies) {
428   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
429     GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
430   }
431   TensorFactory<ScalarType::Int> tf;
432 
433   Tensor x = tf.zeros({2, 4, 7, 5});
434 
435   // Should be {2, 4, 5} to match the x when calling select() with dim 2.
436   Tensor out = tf.zeros({2, 4, 7});
437 
438   ET_EXPECT_KERNEL_FAILURE(
439       context_, op_select_copy_int_out(x, /*dim=*/2, /*index=*/3, out));
440 }
441 
442 /* %python
443 import torch
444 torch.manual_seed(0)
445 x = torch.rand(2, 3, 4)
446 res = torch.select(x, 1, 2)
447 op = "op_select_copy_int_out"
448 opt_extra_params = "1, 2,"
449 dtype = "ScalarType::Float"
450 check = "EXPECT_TENSOR_EQ" */
451 
TEST_F(OpSelectCopyIntOutTest,DynamicShapeUpperBoundSameAsExpected)452 TEST_F(OpSelectCopyIntOutTest, DynamicShapeUpperBoundSameAsExpected) {
453   /* %python
454   out_args = "{2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
455   %rewrite(unary_op) */
456 
457   TensorFactory<ScalarType::Float> tf;
458 
459   Tensor x = tf.make(
460       {2, 3, 4},
461       {0.49625658988952637,  0.7682217955589294,  0.08847743272781372,
462        0.13203048706054688,  0.30742281675338745, 0.6340786814689636,
463        0.4900934100151062,   0.8964447379112244,  0.455627977848053,
464        0.6323062777519226,   0.3488934636116028,  0.40171730518341064,
465        0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
466        0.518521785736084,    0.6976675987243652,  0.800011396408081,
467        0.16102945804595947,  0.28226858377456665, 0.6816085577011108,
468        0.9151939749717712,   0.39709991216659546, 0.8741558790206909});
469   Tensor expected = tf.make(
470       {2, 4},
471       {0.455627977848053,
472        0.6323062777519226,
473        0.3488934636116028,
474        0.40171730518341064,
475        0.6816085577011108,
476        0.9151939749717712,
477        0.39709991216659546,
478        0.8741558790206909});
479 
480   Tensor out =
481       tf.zeros({2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
482   op_select_copy_int_out(x, 1, 2, out);
483   EXPECT_TENSOR_EQ(out, expected);
484 }
485 
TEST_F(OpSelectCopyIntOutTest,DynamicShapeUpperBoundLargerThanExpected)486 TEST_F(OpSelectCopyIntOutTest, DynamicShapeUpperBoundLargerThanExpected) {
487   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
488     GTEST_SKIP() << "Dynamic shape not supported";
489   }
490   /* %python
491   out_args = "{5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
492   %rewrite(unary_op) */
493 
494   TensorFactory<ScalarType::Float> tf;
495 
496   Tensor x = tf.make(
497       {2, 3, 4},
498       {0.49625658988952637,  0.7682217955589294,  0.08847743272781372,
499        0.13203048706054688,  0.30742281675338745, 0.6340786814689636,
500        0.4900934100151062,   0.8964447379112244,  0.455627977848053,
501        0.6323062777519226,   0.3488934636116028,  0.40171730518341064,
502        0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
503        0.518521785736084,    0.6976675987243652,  0.800011396408081,
504        0.16102945804595947,  0.28226858377456665, 0.6816085577011108,
505        0.9151939749717712,   0.39709991216659546, 0.8741558790206909});
506   Tensor expected = tf.make(
507       {2, 4},
508       {0.455627977848053,
509        0.6323062777519226,
510        0.3488934636116028,
511        0.40171730518341064,
512        0.6816085577011108,
513        0.9151939749717712,
514        0.39709991216659546,
515        0.8741558790206909});
516 
517   Tensor out =
518       tf.zeros({5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
519   op_select_copy_int_out(x, 1, 2, out);
520   EXPECT_TENSOR_EQ(out, expected);
521 }
522 
TEST_F(OpSelectCopyIntOutTest,DynamicShapeUnbound)523 TEST_F(OpSelectCopyIntOutTest, DynamicShapeUnbound) {
524   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
525     GTEST_SKIP() << "Dynamic shape not supported";
526   }
527   /* %python
528   out_args = "{1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
529   %rewrite(unary_op) */
530 
531   TensorFactory<ScalarType::Float> tf;
532 
533   Tensor x = tf.make(
534       {2, 3, 4},
535       {0.49625658988952637,  0.7682217955589294,  0.08847743272781372,
536        0.13203048706054688,  0.30742281675338745, 0.6340786814689636,
537        0.4900934100151062,   0.8964447379112244,  0.455627977848053,
538        0.6323062777519226,   0.3488934636116028,  0.40171730518341064,
539        0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
540        0.518521785736084,    0.6976675987243652,  0.800011396408081,
541        0.16102945804595947,  0.28226858377456665, 0.6816085577011108,
542        0.9151939749717712,   0.39709991216659546, 0.8741558790206909});
543   Tensor expected = tf.make(
544       {2, 4},
545       {0.455627977848053,
546        0.6323062777519226,
547        0.3488934636116028,
548        0.40171730518341064,
549        0.6816085577011108,
550        0.9151939749717712,
551        0.39709991216659546,
552        0.8741558790206909});
553 
554   Tensor out =
555       tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
556   op_select_copy_int_out(x, 1, 2, out);
557   EXPECT_TENSOR_EQ(out, expected);
558 }
559