xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/FullyConnectedLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/Utils.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/RawTensor.h"
34 #include "tests/framework/Asserts.h"
35 #include "tests/framework/Fixture.h"
36 #include "tests/validation/Helpers.h"
37 #include "tests/validation/reference/ActivationLayer.h"
38 #include "tests/validation/reference/FullyConnectedLayer.h"
39 #include "tests/validation/reference/Utils.h"
40 
41 #include <random>
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
50 class FullyConnectedLayerValidationGenericFixture : public framework::Fixture
51 {
52 public:
53     using TDecay = typename std::decay<T>::type;
54     using TBias  = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type;
55 
56 public:
57     template <typename...>
58     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
59                DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false)
60     {
61         ARM_COMPUTE_UNUSED(weights_shape);
62         ARM_COMPUTE_UNUSED(bias_shape);
63 
64         _mixed_layout      = mixed_layout;
65         _data_type         = data_type;
66         _bias_data_type    = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
67         _quantization_info = quantization_info;
68         _activation_info   = activation_info;
69 
70         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
71         _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape);
72     }
73 
74 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)75     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
76     {
77         const DataLayout data_layout = src.info()->data_layout();
78         // Test Multi DataLayout graph cases, when the data layout changes after configure
79         src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
80         dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
81 
82         // Compute Convolution function
83         layer.run();
84 
85         // Reinstating original data layout for the test suite to properly check the values
86         src.info()->set_data_layout(data_layout);
87         dst.info()->set_data_layout(data_layout);
88     }
89 
90     template <typename U>
fill(U && tensor,int i)91     void fill(U &&tensor, int i)
92     {
93         if(_data_type == DataType::QASYMM8)
94         {
95             std::uniform_int_distribution<uint32_t> distribution(0, 30);
96             library->fill(tensor, distribution, i);
97         }
98         else if(_data_type == DataType::QASYMM8_SIGNED)
99         {
100             std::uniform_int_distribution<int32_t> distribution(-15, 15);
101             library->fill(tensor, distribution, i);
102         }
103         else if(_data_type == DataType::S32)
104         {
105             std::uniform_int_distribution<int32_t> distribution(-50, 50);
106             library->fill(tensor, distribution, i);
107         }
108         else if(_data_type == DataType::F16)
109         {
110             arm_compute::utils::uniform_real_distribution_16bit<half> distribution(-1.0f, 1.0f);
111             library->fill(tensor, distribution, i);
112         }
113         else if(_data_type == DataType::F32)
114         {
115             std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
116             library->fill(tensor, distribution, i);
117         }
118         else
119         {
120             library->fill_tensor_uniform(tensor, i);
121         }
122     }
123 
compute_target(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,bool transpose_weights,bool reshape_weights)124     TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights,
125                               bool reshape_weights)
126     {
127         TensorShape reshaped_weights_shape(weights_shape);
128 
129         // Test actions depending on the target settings
130         //
131         //            | reshape   | !reshape
132         // -----------+-----------+---------------------------
133         //  transpose |           | ***
134         // -----------+-----------+---------------------------
135         // !transpose | transpose | transpose
136         //            |           |
137         //
138         // ***: That combination is invalid. But we can ignore the transpose flag and handle all !reshape the same
139         if(!reshape_weights || !transpose_weights)
140         {
141             const size_t shape_x = reshaped_weights_shape.x();
142             reshaped_weights_shape.set(0, reshaped_weights_shape.y());
143             reshaped_weights_shape.set(1, shape_x);
144         }
145 
146         // Create tensors
147         TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
148         TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
149         TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
150         TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
151 
152         // Create Fully Connected layer info
153         FullyConnectedLayerInfo fc_info;
154         fc_info.transpose_weights    = transpose_weights;
155         fc_info.are_weights_reshaped = !reshape_weights;
156         fc_info.activation_info      = _activation_info;
157 
158         // Create and configure function.
159         FunctionType fc;
160         fc.configure(&src, &weights, &bias, &dst, fc_info);
161 
162         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
163         ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
164         ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
165         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
166 
167         add_padding_x({ &src, &weights, &bias, &dst });
168 
169         // Allocate tensors
170         src.allocator()->allocate();
171         weights.allocator()->allocate();
172         bias.allocator()->allocate();
173         dst.allocator()->allocate();
174 
175         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
176         ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
177         ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
178         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
179 
180         // Fill tensors
181         fill(AccessorType(src), 0);
182         fill(AccessorType(bias), 2);
183 
184         if(!reshape_weights || !transpose_weights)
185         {
186             TensorShape tmp_shape(weights_shape);
187             RawTensor   tmp(tmp_shape, _data_type, 1);
188 
189             // Fill with original shape
190             fill(tmp, 1);
191 
192             // Transpose elementwise
193             tmp = transpose(tmp);
194 
195             AccessorType weights_accessor(weights);
196 
197             for(int i = 0; i < tmp.num_elements(); ++i)
198             {
199                 Coordinates coord = index2coord(tmp.shape(), i);
200                 std::copy_n(static_cast<const RawTensor::value_type *>(tmp(coord)),
201                             tmp.element_size(),
202                             static_cast<RawTensor::value_type *>(weights_accessor(coord)));
203             }
204         }
205         else
206         {
207             fill(AccessorType(weights), 1);
208         }
209 
210         if(_mixed_layout)
211         {
212             mix_layout(fc, src, dst);
213         }
214         else
215         {
216             // Compute NEFullyConnectedLayer function
217             fc.run();
218         }
219 
220         return dst;
221     }
222 
compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape)223     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape)
224     {
225         // Create reference
226         SimpleTensor<T>     src{ input_shape, _data_type, 1, _quantization_info };
227         SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _quantization_info };
228         SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
229 
230         // Fill reference
231         fill(src, 0);
232         fill(weights, 1);
233         fill(bias, 2);
234 
235         return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape, _quantization_info), _activation_info, _quantization_info);
236     }
237 
238     TensorType          _target{};
239     SimpleTensor<T>     _reference{};
240     DataType            _data_type{};
241     DataType            _bias_data_type{};
242     bool                _mixed_layout{ false };
243     QuantizationInfo    _quantization_info{};
244     ActivationLayerInfo _activation_info{};
245 };
246 
247 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
248 class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
249 {
250 public:
251     template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,bool transpose_weights,bool reshape_weights,DataType data_type,ActivationLayerInfo activation_info)252     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type,
253                ActivationLayerInfo activation_info)
254     {
255         FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
256                                                                                                       reshape_weights, data_type,
257                                                                                                       QuantizationInfo(), activation_info, mixed_layout);
258     }
259 };
260 
261 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
262 class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
263 {
264 public:
265     template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,bool transpose_weights,bool reshape_weights,DataType data_type,QuantizationInfo quantization_info,ActivationLayerInfo activation_info)266     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type,
267                QuantizationInfo quantization_info, ActivationLayerInfo activation_info)
268     {
269         FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
270                                                                                                       reshape_weights, data_type,
271                                                                                                       quantization_info, activation_info, mixed_layout);
272     }
273 };
274 
275 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
276 class FullyConnectedWithDynamicTensorsFixture : public framework::Fixture
277 {
278 private:
279     template <typename U>
fill(U && tensor,int i)280     void fill(U &&tensor, int i)
281     {
282         if(_data_type == DataType::F16)
283         {
284             arm_compute::utils::uniform_real_distribution_16bit<half> distribution(-1.0f, 1.0f);
285             library->fill(tensor, distribution, i);
286         }
287         else if(_data_type == DataType::F32)
288         {
289             std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
290             library->fill(tensor, distribution, i);
291         }
292         else if(_data_type == DataType::QASYMM8)
293         {
294             std::uniform_int_distribution<uint32_t> distribution(0, 30);
295             library->fill(tensor, distribution, i);
296         }
297         else if(_data_type == DataType::S32)
298         {
299             std::uniform_int_distribution<int32_t> distribution(-50, 50);
300             library->fill(tensor, distribution, i);
301         }
302         else
303         {
304             library->fill_tensor_uniform(tensor, i);
305         }
306     }
307 
fill_transposed_weights(TensorType & weights,TensorShape weights_shape,int seed)308     void fill_transposed_weights(TensorType &weights, TensorShape weights_shape, int seed)
309     {
310         RawTensor tmp(weights_shape, _data_type, 1);
311 
312         // Fill with original shape
313         fill(tmp, seed);
314 
315         // Transpose elementwise
316         tmp = transpose(tmp);
317 
318         AccessorType weights_accessor(weights);
319 
320         for(int i = 0; i < tmp.num_elements(); ++i)
321         {
322             Coordinates coord = index2coord(tmp.shape(), i);
323             std::copy_n(static_cast<const RawTensor::value_type *>(tmp(coord)),
324                         tmp.element_size(),
325                         static_cast<RawTensor::value_type *>(weights_accessor(coord)));
326         }
327     }
328 
validate_with_tolerance(TensorType & target,SimpleTensor<T> & ref)329     void validate_with_tolerance(TensorType &target, SimpleTensor<T> &ref)
330     {
331         if(_data_type == DataType::F32)
332         {
333             constexpr RelativeTolerance<float> rel_tolerance_f32(0.05f);
334             constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f);
335             validate(AccessorType(target), ref, rel_tolerance_f32, 0, abs_tolerance_f32);
336         }
337         else if(_data_type == DataType::QASYMM8)
338         {
339             constexpr AbsoluteTolerance<uint32_t> tolerance_qasymm8(1);
340             validate(AccessorType(target), ref, tolerance_qasymm8);
341         }
342         else
343         {
344             validate(AccessorType(target), ref);
345         }
346     }
347 
348 public:
349     using TDecay = typename std::decay<T>::type;
350     using TBias  = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type;
351 
352     template <typename...>
setup(TensorShape src_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape dst_shape,DataType data_type,ActivationLayerInfo activation_info,bool constant_weights,bool constant_bias)353     void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
354                DataType data_type, ActivationLayerInfo activation_info, bool constant_weights, bool constant_bias)
355     {
356         _data_type = data_type;
357 
358         const bool is_quantized = is_data_type_quantized(data_type);
359 
360         const DataType bias_data_type = (is_quantized) ? DataType::S32 : data_type;
361 
362         const QuantizationInfo src_qinfo     = is_quantized ? QuantizationInfo(0.1f, 10) : QuantizationInfo();
363         const QuantizationInfo weights_qinfo = is_quantized ? QuantizationInfo(0.3f, 20) : QuantizationInfo();
364         const QuantizationInfo dst_qinfo     = is_quantized ? QuantizationInfo(0.2f, 5) : QuantizationInfo();
365 
366         // Setup tensor meta-data
367         const TensorInfo src_info(src_shape, 1, data_type, src_qinfo);
368         _src.allocator()->init(src_info);
369 
370         TensorInfo wei_info(weights_shape, 1, data_type, weights_qinfo);
371         if(!constant_weights)
372         {
373             const TensorShape tr_weights_shape{ weights_shape[1], weights_shape[0] };
374             wei_info.set_tensor_shape(tr_weights_shape);
375         }
376         wei_info.set_are_values_constant(constant_weights);
377         _weights.allocator()->init(wei_info);
378 
379         TensorInfo bias_info(bias_shape, 1, bias_data_type);
380         bias_info.set_are_values_constant(constant_bias);
381         _bias.allocator()->init(bias_info);
382 
383         const TensorInfo dst_info(dst_shape, 1, data_type, dst_qinfo);
384         _dst.allocator()->init(dst_info);
385 
386         // Configure FC layer and mark the weights as non constant
387         FullyConnectedLayerInfo fc_info;
388         fc_info.activation_info = activation_info;
389         if(!constant_weights)
390         {
391             fc_info.are_weights_reshaped = true;
392             fc_info.transpose_weights    = false;
393         }
394         FunctionType fc;
395         fc.configure(&_src, &_weights, &_bias, &_dst, fc_info);
396 
397         // Allocate all the tensors
398         _src.allocator()->allocate();
399         _weights.allocator()->allocate();
400         _bias.allocator()->allocate();
401         _dst.allocator()->allocate();
402 
403         // Run multiple iterations with different inputs
404         constexpr int num_iterations    = 5;
405         int           randomizer_offset = 0;
406 
407         // Create reference tensors
408         SimpleTensor<T>     src{ src_shape, data_type, 1, src_qinfo };
409         SimpleTensor<T>     weights{ weights_shape, data_type, 1, weights_qinfo };
410         SimpleTensor<TBias> bias{ bias_shape, bias_data_type };
411 
412         // Fill weights and/or bias if they remain constant
413         if(constant_weights)
414         {
415             fill(AccessorType(_weights), 1);
416             fill(weights, 1);
417         }
418         if(constant_bias)
419         {
420             fill(AccessorType(_bias), 2);
421             fill(bias, 2);
422         }
423 
424         for(int i = 0; i < num_iterations; ++i)
425         {
426             // Run target
427             {
428                 fill(AccessorType(_src), randomizer_offset);
429                 if(!constant_weights)
430                 {
431                     fill_transposed_weights(_weights, weights_shape, randomizer_offset + 1);
432                 }
433                 if(!constant_bias)
434                 {
435                     fill(AccessorType(_bias), randomizer_offset + 2);
436                 }
437 
438                 fc.run();
439             }
440 
441             // Run reference and compare
442             {
443                 // Fill reference
444                 fill(src, randomizer_offset);
445                 if(!constant_weights)
446                 {
447                     fill(weights, randomizer_offset + 1);
448                 }
449                 if(!constant_bias)
450                 {
451                     fill(bias, randomizer_offset + 2);
452                 }
453 
454                 auto dst = reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, dst_shape, dst_qinfo), activation_info, dst_qinfo);
455 
456                 // Validate
457                 validate_with_tolerance(_dst, dst);
458             }
459 
460             randomizer_offset += 100;
461         }
462     }
463 
464 private:
465     TensorType _src{}, _weights{}, _bias{}, _dst{};
466     DataType   _data_type{ DataType::UNKNOWN };
467 };
468 
469 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
470 class FullyConnectedWithDynamicWeightsFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
471 {
472 public:
473     template <typename...>
setup(TensorShape src_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape dst_shape,DataType data_type,ActivationLayerInfo activation_info)474     void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
475                DataType data_type, ActivationLayerInfo activation_info)
476     {
477         FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
478                                                                                                   dst_shape, data_type, activation_info, false, true);
479     }
480 };
481 
482 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
483 class FullyConnectedWithDynamicBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
484 {
485 public:
486     template <typename...>
setup(TensorShape src_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape dst_shape,DataType data_type,ActivationLayerInfo activation_info)487     void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
488                DataType data_type, ActivationLayerInfo activation_info)
489     {
490         FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
491                                                                                                   dst_shape, data_type, activation_info, true, false);
492     }
493 };
494 } // namespace validation
495 } // namespace test
496 } // namespace arm_compute
497 #endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE */
498