1 /* 2 * Copyright (c) 2017-2019 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 25 #pragma once 26 #include <cstdlib> 27 #include <random> 28 29 #include "alloc.hpp" 30 31 enum TensorOrder 32 { 33 NHWC, ///< [Batch x Height x Width x Channels] 34 NCHW, ///< [Batch x Channels x Height x Width] 35 }; 36 37 struct Tensor4DShape 38 { 39 int n_batches, n_rows, n_cols, n_channels; 40 TensorOrder ordering; 41 42 // Create a new tensor with the default (NHWC) ordering Tensor4DShapeTensor4DShape43 inline Tensor4DShape( 44 const int n_batches, 45 const int n_rows, 46 const int n_cols, 47 const int n_channels, 48 const TensorOrder ordering=NHWC 49 ) : n_batches(n_batches), 50 n_rows(n_rows), 51 n_cols(n_cols), 52 n_channels(n_channels), 53 ordering(ordering) 54 { 55 } 56 indexTensor4DShape57 inline int index(const int n, const int i, const int j, const int c) const 58 { 59 if (this->ordering == NHWC) 60 { 61 return ((n*this->n_rows + i)*this->n_cols + j)*this->n_channels + c; 62 } 63 else // NCHW 64 { 65 return ((n*this->n_channels + c)*this->n_rows + i)*this->n_cols + j; 66 } 67 } 68 sizeTensor4DShape69 inline int size() const 70 { 71 return n_batches * n_rows * n_cols * n_channels; 72 } 73 TestEqTensor4DShape74 inline bool TestEq(const Tensor4DShape& other) const 75 { 76 return (n_batches == other.n_batches && 77 n_rows == other.n_rows && 78 n_cols == other.n_cols && 79 n_channels == other.n_channels); 80 } 81 }; 82 83 84 enum WeightOrder 85 { 86 HWIO, ///< [Height x Width x Input channels x Output channels] 87 OIHW, ///< [Output channels x Input channels x Height x Width] 88 }; 89 90 struct KernelShape 91 { 92 int n_output_channels, n_rows, n_cols, n_input_channels; 93 WeightOrder ordering; 94 KernelShapeKernelShape95 inline KernelShape( 96 const int n_output_channels, 97 const int n_rows, 98 const int n_cols, 99 const int n_input_channels, 100 const WeightOrder ordering=HWIO 101 ) : n_output_channels(n_output_channels), 102 n_rows(n_rows), 103 n_cols(n_cols), 104 n_input_channels(n_input_channels), 105 ordering(ordering) 106 { 107 } 108 indexKernelShape109 inline int index(int oc, int i, int j, int ic) const 110 { 111 if (this->ordering == HWIO) 112 { 113 return ((i*this->n_cols + j)*this->n_input_channels + ic)*this->n_output_channels + oc; 114 } 115 else // OIHW 116 { 117 return ((oc*this->n_input_channels + ic)*this->n_rows + i)*this->n_cols + j; 118 } 119 } 120 sizeKernelShape121 inline int size(void) const 122 { 123 return n_output_channels * n_rows * n_cols * n_input_channels; 124 } 125 }; 126 127 128 template <typename ShapeT, typename T> 129 class Tensor4D final 130 { 131 public: Tensor4D(ShapeT shape)132 Tensor4D(ShapeT shape) : 133 shape(shape), 134 _data(reinterpret_cast<T*>(ALLOCATE(size_bytes()))) 135 { 136 Clear(); 137 } 138 139 Tensor4D(const Tensor4D<ShapeT, T>&) = delete; 140 Tensor4D operator=(const Tensor4D<ShapeT, T>&) = delete; 141 ~Tensor4D()142 ~Tensor4D() { 143 free(_data); 144 } 145 ptr() const146 inline T* ptr() const { 147 return _data; 148 } 149 size_bytes() const150 inline size_t size_bytes() const { 151 return shape.size() * sizeof(T); 152 } 153 154 /* Extract an element of the tensor. 155 * 156 * If the shape is a Tensor4DShape then the index is given as batch, row, 157 * column and channel. If the shape is a KernelShape then the index is 158 * given as output channel, row, column and input channel. 159 */ element(const int a,const int b,const int c,const int d) const160 inline T& element(const int a, const int b, const int c, const int d) const 161 { 162 return _data[shape.index(a, b, c, d)]; 163 } 164 Clear()165 inline void Clear() { 166 Fill(static_cast<T>(0)); 167 } 168 Fill(T val)169 inline void Fill(T val) { 170 for (int i = 0; i < shape.size(); i++) 171 _data[i] = val; 172 } 173 174 const ShapeT shape; 175 176 private: 177 T* const _data; 178 }; 179