xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/WinogradConvolutionLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/ActivationLayer.h"
37 #include "tests/validation/reference/ConvolutionLayer.h"
38 #include "tests/validation/reference/GEMM.h"
39 #include "tests/validation/reference/Permute.h"
40 #include "tests/validation/reference/Utils.h"
41 #include "tests/validation/reference/Winograd.h"
42 #include "utils/Utils.h"
43 
44 #include <random>
45 
46 namespace arm_compute
47 {
48 namespace test
49 {
50 namespace validation
51 {
52 using namespace arm_compute::misc::shape_calculator;
53 
54 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
55 class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
56 {
57 public:
58     template <typename...>
setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataType data_type,ActivationLayerInfo act_info,const DataLayout & data_layout)59     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
60                DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
61 
62     {
63         ARM_COMPUTE_UNUSED(dilation);
64         _mixed_layout = mixed_layout;
65         _target       = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
66         _reference    = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
67     }
68 
69 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)70     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
71     {
72         const DataLayout data_layout = src.info()->data_layout();
73         // Test Multi DataLayout graph cases, when the data layout changes after configure
74         src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
75         dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
76 
77         // Compute Convolution function
78         layer.run();
79 
80         // Reinstating original data layout for the test suite to properly check the values
81         src.info()->set_data_layout(data_layout);
82         dst.info()->set_data_layout(data_layout);
83     }
84 
85     template <typename U>
fill(U && tensor,int i,float min,float max)86     void fill(U &&tensor, int i, float min, float max)
87     {
88         switch(tensor.data_type())
89         {
90             case DataType::F16:
91             {
92                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
93                 library->fill(tensor, distribution, i);
94                 break;
95             }
96             case DataType::F32:
97             {
98                 std::uniform_real_distribution<float> distribution(min, max);
99                 library->fill(tensor, distribution, i);
100                 break;
101             }
102             default:
103             {
104                 ARM_COMPUTE_ERROR("Not supported");
105             }
106         }
107     }
108 
compute_target(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,const PadStrideInfo & info,DataType data_type,ActivationLayerInfo act_info,const DataLayout data_layout)109     TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
110                               DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
111     {
112         if(data_layout == DataLayout::NHWC)
113         {
114             permute(input_shape, PermutationVector(2U, 0U, 1U));
115             permute(weights_shape, PermutationVector(2U, 0U, 1U));
116             permute(output_shape, PermutationVector(2U, 0U, 1U));
117         }
118 
119         // Create tensors
120         TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
121         TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
122         TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
123         TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
124 
125         // Create and configure function
126         FunctionType conv;
127         ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
128                            framework::LogLevel::ERRORS);
129         conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
130 
131         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
132         ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
133         ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
134         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
135 
136         add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
137 
138         // Allocate tensors
139         src.allocator()->allocate();
140         weights.allocator()->allocate();
141         dst.allocator()->allocate();
142         bias.allocator()->allocate();
143 
144         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
145         ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
146         ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
147         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
148 
149         // Fill tensors
150         fill(AccessorType(src), 0, -0.5f, 0.5f);
151         fill(AccessorType(weights), 1, -0.5f, 0.5f);
152         fill(AccessorType(bias), 2, -0.5f, 0.5f);
153 
154         if(_mixed_layout)
155         {
156             mix_layout(conv, src, dst);
157         }
158         else
159         {
160             // Compute function
161             conv.run();
162         }
163         return dst;
164     }
165 
compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const PadStrideInfo & info,DataType data_type,ActivationLayerInfo act_info)166     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info,
167                                       DataType data_type, ActivationLayerInfo act_info)
168     {
169         // Create reference
170         SimpleTensor<T> src_t{ input_shape, data_type, 1 };
171         SimpleTensor<T> weights_t{ weights_shape, data_type, 1 };
172         SimpleTensor<T> bias_t{ bias_shape, data_type, 1 };
173 
174         // Fill reference
175         fill(src_t, 0, -0.5f, 0.5f);
176         SimpleTensor<T1> src_t1(copy_tensor<T1, T>(src_t));
177 
178         fill(weights_t, 1, -0.5f, 0.5f);
179         SimpleTensor<T1> weights_t1(copy_tensor<T1, T>(weights_t));
180         if(use_bias)
181         {
182             fill(bias_t, 2, -0.5f, 0.5f);
183         }
184         else
185         {
186             fill(bias_t, 2, 0.f, 0.f);
187         }
188         SimpleTensor<T1> bias_t1(copy_tensor<T1, T>(bias_t));
189 
190         // Set output tile
191         Size2D output_tile(4U, 4U);
192         if(weights_shape[0] == 7 && weights_shape[1] == 1)
193         {
194             output_tile.width  = 2;
195             output_tile.height = 1;
196         }
197         else if(weights_shape[0] == 1 && weights_shape[1] == 7)
198         {
199             output_tile.width  = 1;
200             output_tile.height = 2;
201         }
202         else if(weights_shape[0] == 1)
203         {
204             output_tile.width = 1;
205         }
206         else if(weights_shape[1] == 1)
207         {
208             output_tile.height = 1;
209         }
210 
211         WinogradInfo winograd_info(output_tile,
212                                    Size2D(weights_shape[0], weights_shape[1]),
213                                    Size2D(input_shape[0], input_shape[1]),
214                                    info,
215                                    src_t1.data_layout());
216 
217         // Compute tensor shapes for input, filter and output transforms
218         TensorShape input_transform_shape  = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
219         TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
220         TensorShape batched_gemm_shape     = input_transform_shape;
221         batched_gemm_shape[0]              = filter_transform_shape[0];
222         TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
223 
224         // Dummy matrix C to perform matrix multiplication
225         SimpleTensor<T1> dummy_c{ batched_gemm_shape, data_type, 1 };
226 
227         // Compute Winograd-based convolution
228         SimpleTensor<T1> input_transform_out = reference::winograd_input_transform<T1>(src_t1, input_transform_shape, winograd_info);
229 
230         SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
231         SimpleTensor<T1> batched_gemm         = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
232         SimpleTensor<T1> conv_out             = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
233         SimpleTensor<T>  conv_out_t(std::move(copy_tensor<T, T1>(conv_out)));
234         return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
235     }
236 
237     TensorType      _target{};
238     SimpleTensor<T> _reference{};
239     bool            _mixed_layout{ false };
240 };
241 
242 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
243 class WinogradInputTransformValidationFixture : public framework::Fixture
244 {
245 public:
246     template <typename...>
setup(TensorShape input_shape,WinogradInfo winograd_info,DataLayout data_layout,DataType data_type)247     void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
248     {
249         TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
250         _mixed_layout            = mixed_layout;
251         _target                  = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
252         _reference               = compute_reference(input_shape, output_shape, winograd_info, data_type);
253     }
254 
255 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)256     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
257     {
258         const DataLayout data_layout_src = src.info()->data_layout();
259         const DataLayout data_layout_dst = dst.info()->data_layout();
260 
261         // Test Multi DataLayout graph cases, when the data layout changes after configure
262         src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
263         dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
264 
265         // Compute Convolution function
266         layer.run();
267 
268         // Reinstating original data layout for the test suite to properly check the values
269         src.info()->set_data_layout(data_layout_src);
270         dst.info()->set_data_layout(data_layout_dst);
271     }
272 
273     template <typename U>
fill(U && tensor,int i,float min,float max)274     void fill(U &&tensor, int i, float min, float max)
275     {
276         switch(tensor.data_type())
277         {
278             case DataType::F16:
279             {
280                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
281                 library->fill(tensor, distribution, i);
282                 break;
283             }
284             case DataType::F32:
285             {
286                 std::uniform_real_distribution<float> distribution(min, max);
287                 library->fill(tensor, distribution, i);
288                 break;
289             }
290             default:
291             {
292                 ARM_COMPUTE_ERROR("Not supported");
293             }
294         }
295     }
296 
compute_target(TensorShape input_shape,const TensorShape & output_shape,const WinogradInfo & winograd_info,DataLayout data_layout,DataType data_type)297     TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
298     {
299         if(data_layout == DataLayout::NHWC)
300         {
301             permute(input_shape, PermutationVector(2U, 0U, 1U));
302         }
303 
304         TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
305         TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
306 
307         // Create and configure function
308         FunctionType transf;
309         transf.configure(&src, &dst, winograd_info);
310 
311         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
312         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
313 
314         add_padding_x({ &src, &dst }, data_layout);
315 
316         // Allocate tensors
317         src.allocator()->allocate();
318         dst.allocator()->allocate();
319 
320         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
321         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
322 
323         // Fill tensors
324         fill(AccessorType(src), 0, -1.f, 1.f);
325 
326         if(_mixed_layout)
327         {
328             mix_layout(transf, src, dst);
329         }
330         else
331         {
332             // Compute Winograd input transform function
333             transf.run();
334         }
335         return dst;
336     }
337 
compute_reference(const TensorShape & input_shape,const TensorShape & output_shape,const WinogradInfo & winograd_info,DataType data_type)338     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
339     {
340         // Create reference
341         SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
342 
343         // Fill reference
344         fill(src, 0, -1.f, 1.f);
345 
346         return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
347     }
348 
349     bool            _mixed_layout{ false };
350     TensorType      _target{};
351     SimpleTensor<T> _reference{};
352 };
353 
354 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
355 class WinogradFilterTransformValidationFixture : public framework::Fixture
356 {
357 public:
358     template <typename...>
setup(TensorShape input_shape,Size2D output_tile,DataLayout data_layout,DataType data_type)359     void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
360     {
361         WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
362         TensorShape  output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
363 
364         _mixed_layout = mixed_layout;
365         _target       = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
366         _reference    = compute_reference(input_shape, output_shape, winograd_info, data_type);
367     }
368 
369 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)370     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
371     {
372         const DataLayout data_layout_src = src.info()->data_layout();
373         const DataLayout data_layout_dst = dst.info()->data_layout();
374 
375         // Test Multi DataLayout graph cases, when the data layout changes after configure
376         src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
377         dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
378 
379         // Compute Convolution function
380         layer.run();
381 
382         // Reinstating original data layout for the test suite to properly check the values
383         src.info()->set_data_layout(data_layout_src);
384         dst.info()->set_data_layout(data_layout_dst);
385     }
386 
387     template <typename U>
fill(U && tensor,int i,float min,float max)388     void fill(U &&tensor, int i, float min, float max)
389     {
390         switch(tensor.data_type())
391         {
392             case DataType::F16:
393             {
394                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
395                 library->fill(tensor, distribution, i);
396                 break;
397             }
398             case DataType::F32:
399             {
400                 std::uniform_real_distribution<float> distribution(min, max);
401                 library->fill(tensor, distribution, i);
402                 break;
403             }
404             default:
405             {
406                 ARM_COMPUTE_ERROR("Not supported");
407             }
408         }
409     }
410 
compute_target(TensorShape input_shape,const TensorShape & output_shape,const WinogradInfo & winograd_info,DataLayout data_layout,DataType data_type)411     TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
412     {
413         if(data_layout == DataLayout::NHWC)
414         {
415             permute(input_shape, PermutationVector(2U, 0U, 1U));
416         }
417 
418         // Create tensors
419         TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
420         TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
421 
422         // Create and configure function
423         FunctionType filter_transform;
424         filter_transform.configure(&src, &dst, winograd_info);
425 
426         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
427         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
428 
429         add_padding_x({ &src, &dst }, data_layout);
430 
431         // Allocate tensors
432         src.allocator()->allocate();
433         dst.allocator()->allocate();
434 
435         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
436         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
437 
438         // Fill tensors
439         fill(AccessorType(src), 0, -1.f, 1.f);
440 
441         if(_mixed_layout)
442         {
443             mix_layout(filter_transform, src, dst);
444         }
445         else
446         {
447             // Compute Winograd filter transform function
448             filter_transform.run();
449         }
450         return dst;
451     }
452 
compute_reference(const TensorShape & input_shape,const TensorShape & output_shape,const WinogradInfo & winograd_info,DataType data_type)453     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
454     {
455         // Create reference
456         SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
457 
458         // Fill reference
459         fill(src, 0, -1.f, 1.f);
460 
461         return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
462     }
463 
464     bool            _mixed_layout{ false };
465     TensorType      _target{};
466     SimpleTensor<T> _reference{};
467 };
468 
469 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
470 class WinogradOutputTransformValidationFixture : public framework::Fixture
471 {
472 public:
473     template <typename...>
474     void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
475     {
476         _target    = compute_target(input_shape, winograd_info, data_type, act_info);
477         _reference = compute_reference(input_shape, winograd_info, data_type, act_info);
478     }
479 
480 protected:
mix_layout(FunctionType & layer,TensorType & src,TensorType & dst)481     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
482     {
483         const DataLayout data_layout_src = src.info()->data_layout();
484         const DataLayout data_layout_dst = dst.info()->data_layout();
485 
486         // Test Multi DataLayout graph cases, when the data layout changes after configure
487         src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
488         dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
489 
490         // Compute Convolution function
491         layer.run();
492 
493         // Reinstating original data layout for the test suite to properly check the values
494         src.info()->set_data_layout(data_layout_src);
495         dst.info()->set_data_layout(data_layout_dst);
496     }
497 
498     template <typename U>
fill(U && tensor,int i,float min,float max)499     void fill(U &&tensor, int i, float min, float max)
500     {
501         switch(tensor.data_type())
502         {
503             case DataType::F16:
504             {
505                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
506                 library->fill(tensor, distribution, i);
507                 break;
508             }
509             case DataType::F32:
510             {
511                 std::uniform_real_distribution<float> distribution(min, max);
512                 library->fill(tensor, distribution, i);
513                 break;
514             }
515             default:
516             {
517                 ARM_COMPUTE_ERROR("Not supported");
518             }
519         }
520     }
521 
compute_target(const TensorShape & input_shape,const WinogradInfo & winograd_info,DataType data_type,ActivationLayerInfo act_info)522     TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type, ActivationLayerInfo act_info)
523     {
524         TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
525 
526         // Create tensors
527         TensorType src  = create_tensor<TensorType>(input_shape, data_type);
528         TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
529         TensorType dst  = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
530 
531         // Create and configure function
532         FunctionType output_transform;
533         output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
534 
535         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
536         ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
537         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
538 
539         add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
540 
541         // Allocate tensors
542         src.allocator()->allocate();
543         bias.allocator()->allocate();
544         dst.allocator()->allocate();
545 
546         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
547         ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
548         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
549 
550         // Fill tensors
551         fill(AccessorType(src), 0, -1.f, 1.f);
552         fill(AccessorType(bias), 1, -1.f, 1.f);
553 
554         if(_mixed_layout)
555         {
556             mix_layout(output_transform, src, dst);
557         }
558         else
559         {
560             // Compute Winograd output transform function
561             output_transform.run();
562         }
563         return dst;
564     }
565 
compute_reference(const TensorShape & input_shape,WinogradInfo winograd_info,DataType data_type,ActivationLayerInfo act_info)566     SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info)
567     {
568         winograd_info.output_data_layout = DataLayout::NCHW;
569         TensorShape output_shape         = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
570 
571         // Create reference
572         SimpleTensor<T> src{ input_shape, data_type };
573         SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
574 
575         // Fill reference
576         fill(src, 0, -1.f, 1.f);
577         fill(bias, 1, -1.f, 1.f);
578 
579         const SimpleTensor<T> winograd_output = reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
580 
581         return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
582     }
583 
584     bool            _mixed_layout{ false };
585     TensorType      _target{};
586     SimpleTensor<T> _reference{};
587 };
588 } // namespace validation
589 } // namespace test
590 } // namespace arm_compute
591 #endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */