1 /* 2 * Copyright (c) 2017-2021 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE 25 #define ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE 26 27 #include "arm_compute/core/TensorShape.h" 28 #include "arm_compute/core/Types.h" 29 #include "tests/AssetsLibrary.h" 30 #include "tests/Globals.h" 31 #include "tests/IAccessor.h" 32 #include "tests/framework/Asserts.h" 33 #include "tests/framework/Fixture.h" 34 #include "tests/validation/reference/ActivationLayer.h" 35 #include "tests/validation/reference/PixelWiseMultiplication.h" 36 37 namespace arm_compute 38 { 39 namespace test 40 { 41 namespace validation 42 { 43 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2> 44 class PixelWiseMultiplicationGenericValidationFixture : public framework::Fixture 45 { 46 public: 47 template <typename...> setup(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,QuantizationInfo qinfo0,QuantizationInfo qinfo1,QuantizationInfo qinfo_out,ActivationLayerInfo act_info,bool is_inplace)48 void setup(const TensorShape &shape0, 49 const TensorShape &shape1, 50 DataType dt_in1, 51 DataType dt_in2, 52 DataType dt_out, 53 float scale, 54 ConvertPolicy convert_policy, 55 RoundingPolicy rounding_policy, 56 QuantizationInfo qinfo0, 57 QuantizationInfo qinfo1, 58 QuantizationInfo qinfo_out, 59 ActivationLayerInfo act_info, 60 bool is_inplace) 61 { 62 _is_inplace = is_inplace; 63 _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info); 64 _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info); 65 } 66 67 protected: 68 template <typename U> fill(U && tensor,unsigned int seed_offset)69 void fill(U &&tensor, unsigned int seed_offset) 70 { 71 library->fill_tensor_uniform(tensor, seed_offset); 72 } 73 compute_target(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,QuantizationInfo qinfo0,QuantizationInfo qinfo1,QuantizationInfo qinfo_out,ActivationLayerInfo act_info)74 TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, 75 float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 76 QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info) 77 { 78 // Create tensors 79 const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); 80 TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0); 81 TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1); 82 TensorType dst = create_tensor<TensorType>(out_shape, dt_out, 1, qinfo_out); 83 84 // Check whether do in-place computation and whether inputs are broadcast compatible 85 TensorType *actual_dst = &dst; 86 if(_is_inplace) 87 { 88 bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (dt_in1 == dt_out); 89 bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (dt_in2 == dt_out); 90 bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace); 91 ARM_COMPUTE_ASSERT(do_in_place); 92 93 if(src1_is_inplace) 94 { 95 actual_dst = &src1; 96 } 97 else 98 { 99 actual_dst = &src2; 100 } 101 } 102 103 auto allocate_tensor = [](TensorType & t) 104 { 105 ARM_COMPUTE_ASSERT(t.info()->is_resizable()); 106 t.allocator()->allocate(); 107 ARM_COMPUTE_ASSERT(!t.info()->is_resizable()); 108 }; 109 110 // Create and configure function 111 FunctionType multiply; 112 multiply.configure(&src1, &src2, actual_dst, scale, convert_policy, rounding_policy, act_info); 113 114 allocate_tensor(src1); 115 allocate_tensor(src2); 116 117 // If don't do in-place computation, still need to allocate original dst 118 if(!_is_inplace) 119 { 120 allocate_tensor(dst); 121 } 122 123 // Fill tensors 124 fill(AccessorType(src1), 0); 125 fill(AccessorType(src2), 1); 126 127 // Compute function 128 multiply.run(); 129 130 return std::move(*actual_dst); 131 } 132 compute_reference(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,QuantizationInfo qinfo0,QuantizationInfo qinfo1,QuantizationInfo qinfo_out,ActivationLayerInfo act_info)133 SimpleTensor<T3> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, 134 float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 135 QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info) 136 { 137 // Create reference 138 SimpleTensor<T1> src1{ shape0, dt_in1, 1, qinfo0 }; 139 SimpleTensor<T2> src2{ shape1, dt_in2, 1, qinfo1 }; 140 141 // Fill reference 142 fill(src1, 0); 143 fill(src2, 1); 144 145 auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out); 146 return act_info.enabled() ? reference::activation_layer(result, act_info, qinfo_out) : result; 147 } 148 149 TensorType _target{}; 150 SimpleTensor<T3> _reference{}; 151 bool _is_inplace{ false }; 152 }; 153 154 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2> 155 class PixelWiseMultiplicationValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> 156 { 157 public: 158 template <typename...> setup(const TensorShape & shape,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,bool is_inplace)159 void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, bool is_inplace) 160 { 161 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, 162 QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); 163 } 164 }; 165 166 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2> 167 class PixelWiseMultiplicationBroadcastValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> 168 { 169 public: 170 template <typename...> setup(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,bool is_inplace)171 void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 172 bool is_inplace) 173 { 174 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, 175 QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); 176 } 177 }; 178 179 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> 180 class PixelWiseMultiplicationValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> 181 { 182 public: 183 template <typename...> setup(const TensorShape & shape,DataType dt_in1,DataType dt_in2,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,ActivationLayerInfo act_info,bool is_inplace)184 void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace) 185 { 186 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy, 187 QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); 188 } 189 }; 190 191 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> 192 class PixelWiseMultiplicationValidationIntegerFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> 193 { 194 public: 195 template <typename...> setup(const TensorShape & shape,DataType dt_in1,DataType dt_in2,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,ActivationLayerInfo act_info,bool is_inplace)196 void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace) 197 { 198 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy, 199 QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); 200 } 201 }; 202 203 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> 204 class PixelWiseMultiplicationBroadcastValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> 205 { 206 public: 207 template <typename...> setup(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,ActivationLayerInfo act_info,bool is_inplace)208 void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 209 ActivationLayerInfo act_info, bool is_inplace) 210 { 211 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy, 212 QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); 213 } 214 }; 215 216 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2> 217 class PixelWiseMultiplicationValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> 218 { 219 public: 220 template <typename...> setup(const TensorShape & shape,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,QuantizationInfo qinfo0,QuantizationInfo qinfo1,QuantizationInfo qinfo_out,bool is_inplace)221 void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 222 QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) 223 { 224 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, 225 qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); 226 } 227 }; 228 229 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2> 230 class PixelWiseMultiplicationBroadcastValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> 231 { 232 public: 233 template <typename...> setup(const TensorShape & shape0,const TensorShape & shape1,DataType dt_in1,DataType dt_in2,DataType dt_out,float scale,ConvertPolicy convert_policy,RoundingPolicy rounding_policy,QuantizationInfo qinfo0,QuantizationInfo qinfo1,QuantizationInfo qinfo_out,bool is_inplace)234 void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, 235 QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) 236 { 237 PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, 238 qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); 239 } 240 }; 241 } // namespace validation 242 } // namespace test 243 } // namespace arm_compute 244 #endif /* ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE */ 245