1/* 2* Copyright (c) 2018-2021 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24#include "helpers.h" 25#include "tile_helpers.h" 26 27#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z) 28 29// Check valid VEC_SIZES 30#if VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16 31#error "Only vector sizes 1, 2, 3, 4, 8 and 16 are supported" 32#endif // VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16 33 34#define DIV_MOD_UINT(x, y, div_res, mod_res) \ 35 ({ \ 36 div_res = (uint)((x) * (float)(1.0f / (float)(y))); \ 37 uint r = div_res * (y); \ 38 mod_res = (x)-r; \ 39 }) 40 41#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X) 42 43/** Performs channel shuffle when the data layout is NHWC. See https://arxiv.org/pdf/1707.01083.pdf for details. 44 * 45 * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4 46 * @note The third dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64 47 * @note The first dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_X=num. e.g. -DSRC_DIM_X=64 48 * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2 49 * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1 50 * K is equal to num_channels / num_groups. 51 * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER is; x_dimension % VEC_SIZE. e.g. -DVEC_SIZE_LEFTOVER=1 52 * 53 * @param[in] src_ptr Pointer to the source matrix. Supported data types: All 54 * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes) 55 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) 56 * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes) 57 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) 58 * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes) 59 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) 60 * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes) 61 * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes) 62 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor 63 * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr 64 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) 65 * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes) 66 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) 67 * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) 68 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) 69 * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) 70 * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes) 71 * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes) 72 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor 73 */ 74__kernel void channel_shuffle_nhwc(TENSOR4D_DECLARATION(src), 75 TENSOR4D_DECLARATION(dst)) 76{ 77 // Offset computation 78 const uint curr_out_channel = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER); // output feature map 79 80 uint z = 0; 81 uint batch_id = 0; 82 // Compute curr_channel and batch_id 83 DIV_MOD_UINT(get_global_id(2), (uint)SRC_DIM_Z, batch_id, z); 84 85 VEC_DATA_TYPE(uint, VEC_SIZE) 86 curr_out_channels = (VEC_DATA_TYPE(uint, VEC_SIZE))(curr_out_channel) + VEC_OFFS(uint, VEC_SIZE); 87 88 VEC_DATA_TYPE(uint, VEC_SIZE) 89 in_channels = (curr_out_channels * (VEC_DATA_TYPE(uint, VEC_SIZE))(K)) % (VEC_DATA_TYPE(uint, VEC_SIZE))(SRC_DIM_X) + (curr_out_channels / (VEC_DATA_TYPE(uint, VEC_SIZE))(NUM_GROUPS)); 90 91 // Load the values 92 const __global DATA_TYPE *input_ptr = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * src_stride_y + z * src_stride_z + batch_id * src_stride_w); 93 94#if VEC_SIZE == 1 95 DATA_TYPE out0 = *((const __global * DATA_TYPE)(input_ptr) + in_channels); 96#elif VEC_SIZE == 2 97 VEC_DATA_TYPE(DATA_TYPE, 2) 98 out0 = 99 { 100 *(input_ptr + in_channels.s0), 101 *(input_ptr + in_channels.s1) 102 }; 103#elif VEC_SIZE == 3 104 VEC_DATA_TYPE(DATA_TYPE, 3) 105 out0 = 106 { 107 *(input_ptr + in_channels.s0), 108 *(input_ptr + in_channels.s1), 109 *(input_ptr + in_channels.s2) 110 }; 111#elif VEC_SIZE == 4 112 VEC_DATA_TYPE(DATA_TYPE, 4) 113 out0 = 114 { 115 *(input_ptr + in_channels.s0), 116 *(input_ptr + in_channels.s1), 117 *(input_ptr + in_channels.s2), 118 *(input_ptr + in_channels.s3) 119 }; 120#elif VEC_SIZE == 8 121 VEC_DATA_TYPE(DATA_TYPE, 8) 122 out0 = 123 { 124 *(input_ptr + in_channels.s0), 125 *(input_ptr + in_channels.s1), 126 *(input_ptr + in_channels.s2), 127 *(input_ptr + in_channels.s3), 128 *(input_ptr + in_channels.s4), 129 *(input_ptr + in_channels.s5), 130 *(input_ptr + in_channels.s6), 131 *(input_ptr + in_channels.s7) 132 }; 133#elif VEC_SIZE == 16 134 VEC_DATA_TYPE(DATA_TYPE, 16) 135 out0 = 136 { 137 *(input_ptr + in_channels.s0), 138 *(input_ptr + in_channels.s1), 139 *(input_ptr + in_channels.s2), 140 *(input_ptr + in_channels.s3), 141 *(input_ptr + in_channels.s4), 142 *(input_ptr + in_channels.s5), 143 *(input_ptr + in_channels.s6), 144 *(input_ptr + in_channels.s7), 145 *(input_ptr + in_channels.s8), 146 *(input_ptr + in_channels.s9), 147 *(input_ptr + in_channels.sa), 148 *(input_ptr + in_channels.sb), 149 *(input_ptr + in_channels.sc), 150 *(input_ptr + in_channels.sd), 151 *(input_ptr + in_channels.se), 152 *(input_ptr + in_channels.sf) 153 }; 154#endif // VEC_SIZE == 1 155 156 __global uchar *output_ptr = dst_ptr + curr_out_channel * sizeof(DATA_TYPE) + dst_offset_first_element_in_bytes + get_global_id(1) * dst_stride_y + z * dst_stride_z + batch_id * dst_stride_w; 157 STORE_VECTOR_SELECT(out, DATA_TYPE, output_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0); 158} 159#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X) 160#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z)