1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "ReorgLayer.h"
25
26 #include "arm_compute/core/Types.h"
27 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
28
29 namespace arm_compute
30 {
31 namespace test
32 {
33 namespace validation
34 {
35 namespace reference
36 {
37 template <typename T>
reorg_layer(const SimpleTensor<T> & src,int32_t stride)38 SimpleTensor<T> reorg_layer(const SimpleTensor<T> &src, int32_t stride)
39 {
40 ARM_COMPUTE_ERROR_ON(src.shape().num_dimensions() > 4);
41 ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NCHW);
42
43 TensorInfo input_info(src.shape(), 1, src.data_type());
44 const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(input_info, stride);
45
46 // Create destination tensor
47 SimpleTensor<T> dst{ output_shape, src.data_type() };
48
49 const unsigned int W = dst.shape().x();
50 const unsigned int H = dst.shape().y();
51 const unsigned int C = dst.shape().z();
52 const unsigned int out_c = C / (stride * stride);
53 const unsigned int outer_dims = dst.shape().total_size() / (W * H * C);
54
55 // Calculate layer reorg in NCHW
56 Coordinates map_coords;
57
58 #if defined(_OPENMP)
59 #pragma omp parallel for private(map_coords)
60 #endif /* _OPENMP */
61 for(unsigned int b = 0; b < outer_dims; ++b)
62 {
63 map_coords.set(3, b);
64 for(unsigned int c = 0; c < C; ++c)
65 {
66 map_coords.set(2, c % out_c);
67 const unsigned int offset = c / out_c;
68 for(unsigned int h = 0; h < H; ++h)
69 {
70 map_coords.set(1, h * stride + offset / stride);
71 for(unsigned int w = 0; w < W; ++w)
72 {
73 const unsigned int dst_idx = w + W * (h + H * (c + C * b));
74 map_coords.set(0, w * stride + offset % stride);
75 dst[dst_idx] = *reinterpret_cast<const T *>(src(map_coords));
76 }
77 }
78 }
79 }
80
81 return dst;
82 }
83
84 template SimpleTensor<int32_t> reorg_layer(const SimpleTensor<int32_t> &src, int32_t stride);
85 template SimpleTensor<int16_t> reorg_layer(const SimpleTensor<int16_t> &src, int32_t stride);
86 template SimpleTensor<int8_t> reorg_layer(const SimpleTensor<int8_t> &src, int32_t stride);
87 } // namespace reference
88 } // namespace validation
89 } // namespace test
90 } // namespace arm_compute
91