1 /* 2 * Copyright (c) 2020-2021 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE 25 #define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE 26 27 #include "arm_compute/core/TensorShape.h" 28 #include "arm_compute/core/Types.h" 29 #include "arm_compute/core/utils/misc/ShapeCalculator.h" 30 #include "arm_compute/runtime/Tensor.h" 31 #include "tests/AssetsLibrary.h" 32 #include "tests/Globals.h" 33 #include "tests/IAccessor.h" 34 #include "tests/framework/Asserts.h" 35 #include "tests/framework/Fixture.h" 36 #include "tests/validation/reference/MaxUnpoolingLayer.h" 37 #include "tests/validation/reference/PoolingLayer.h" 38 #include <random> 39 namespace arm_compute 40 { 41 namespace test 42 { 43 namespace validation 44 { 45 template <typename TensorType, typename AccessorType, typename PoolingFunctionType, typename MaxUnpoolingFunctionType, typename T> 46 class MaxUnpoolingLayerValidationGenericFixture : public framework::Fixture 47 { 48 public: 49 template <typename...> setup(TensorShape shape,PoolingLayerInfo pool_info,DataType data_type,DataLayout data_layout)50 void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout) 51 { 52 std::mt19937 gen(library->seed()); 53 std::uniform_int_distribution<> offset_dis(0, 20); 54 const float scale = data_type == DataType::QASYMM8_SIGNED ? 1.f / 127.f : 1.f / 255.f; 55 const int scale_in = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen); 56 const int scale_out = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen); 57 const QuantizationInfo input_qinfo(scale, scale_in); 58 const QuantizationInfo output_qinfo(scale, scale_out); 59 _pool_info = pool_info; 60 _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo); 61 _reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo); 62 } 63 64 protected: 65 template <typename U> fill(U && tensor)66 void fill(U &&tensor) 67 { 68 if(tensor.data_type() == DataType::F32) 69 { 70 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); 71 library->fill(tensor, distribution, 0); 72 } 73 else if(tensor.data_type() == DataType::F16) 74 { 75 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; 76 library->fill(tensor, distribution, 0); 77 } 78 else // data type is quantized_asymmetric 79 { 80 library->fill_tensor_uniform(tensor, 0); 81 } 82 } 83 compute_target(TensorShape input_shape,PoolingLayerInfo pool_info,DataType data_type,DataLayout data_layout,QuantizationInfo input_qinfo,QuantizationInfo output_qinfo)84 TensorType compute_target(TensorShape input_shape, PoolingLayerInfo pool_info, 85 DataType data_type, DataLayout data_layout, 86 QuantizationInfo input_qinfo, QuantizationInfo output_qinfo) 87 { 88 // Change shape in case of NHWC. 89 if(data_layout == DataLayout::NHWC) 90 { 91 permute(input_shape, PermutationVector(2U, 0U, 1U)); 92 } 93 94 // Create tensors 95 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_qinfo, data_layout); 96 const TensorShape dst_shape = misc::shape_calculator::compute_pool_shape(*(src.info()), pool_info); 97 TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout); 98 TensorType unpooled = create_tensor<TensorType>(input_shape, data_type, 1, output_qinfo, data_layout); 99 TensorType indices = create_tensor<TensorType>(dst_shape, DataType::U32, 1, output_qinfo, data_layout); 100 101 // Create and configure function 102 PoolingFunctionType pool_layer; 103 pool_layer.configure(&src, &dst, pool_info, &indices); 104 // Create and configure function 105 106 MaxUnpoolingFunctionType unpool_layer; 107 unpool_layer.configure(&dst, &indices, &unpooled, pool_info); 108 109 ARM_COMPUTE_ASSERT(src.info()->is_resizable()); 110 ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); 111 ARM_COMPUTE_ASSERT(indices.info()->is_resizable()); 112 113 // Allocate tensors 114 src.allocator()->allocate(); 115 dst.allocator()->allocate(); 116 indices.allocator()->allocate(); 117 unpooled.allocator()->allocate(); 118 119 ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); 120 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); 121 ARM_COMPUTE_ASSERT(!indices.info()->is_resizable()); 122 ARM_COMPUTE_ASSERT(!unpooled.info()->is_resizable()); 123 124 // Fill tensors 125 fill(AccessorType(src)); 126 127 // Compute function 128 pool_layer.run(); 129 unpool_layer.run(); 130 return unpooled; 131 } 132 compute_reference(TensorShape input_shape,PoolingLayerInfo info,DataType data_type,QuantizationInfo input_qinfo,QuantizationInfo output_qinfo)133 SimpleTensor<T> compute_reference(TensorShape input_shape, PoolingLayerInfo info, DataType data_type, 134 QuantizationInfo input_qinfo, QuantizationInfo output_qinfo) 135 { 136 SimpleTensor<T> src(input_shape, data_type, 1, input_qinfo); 137 SimpleTensor<uint32_t> indices{}; 138 // Fill reference 139 fill(src); 140 auto pooled_tensor = reference::pooling_layer<T>(src, info, output_qinfo, &indices); 141 return reference::max_unpooling_layer<T>(pooled_tensor, info, output_qinfo, indices, input_shape); 142 } 143 144 TensorType _target{}; 145 SimpleTensor<T> _reference{}; 146 PoolingLayerInfo _pool_info{}; 147 }; 148 149 template <typename TensorType, typename AccessorType, typename F1, typename F2, typename T> 150 class MaxUnpoolingLayerValidationFixture : public MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T> 151 { 152 public: 153 template <typename...> setup(TensorShape shape,PoolingType pool_type,Size2D pool_size,PadStrideInfo pad_stride_info,DataType data_type,DataLayout data_layout)154 void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, DataType data_type, DataLayout data_layout) 155 { 156 MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, true), 157 data_type, data_layout); 158 } 159 }; 160 161 } // namespace validation 162 } // namespace test 163 } // namespace arm_compute 164 #endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */ 165