xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NormalizationTestImpl.hpp"
7 
8 #include <armnn/Exceptions.hpp>
9 
10 #include <armnn/utility/NumericCast.hpp>
11 
12 #include <armnn/backends/TensorHandle.hpp>
13 #include <armnn/backends/ILayerSupport.hpp>
14 #include <armnn/BackendHelper.hpp>
15 
16 #include <armnnTestUtils/TensorCopyUtils.hpp>
17 #include <armnnTestUtils/WorkloadTestUtils.hpp>
18 
19 #include <armnnTestUtils/TensorHelpers.hpp>
20 
21 namespace
22 {
23 
SimpleNormalizationTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::NormalizationAlgorithmChannel normChannel,armnn::NormalizationAlgorithmMethod normMethod)24 LayerTestResult<float,4> SimpleNormalizationTestImpl(
25     armnn::IWorkloadFactory& workloadFactory,
26     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27     const armnn::ITensorHandleFactory& tensorHandleFactory,
28     armnn::NormalizationAlgorithmChannel normChannel,
29     armnn::NormalizationAlgorithmMethod normMethod)
30 {
31     IgnoreUnused(memoryManager);
32     const unsigned int inputHeight = 2;
33     const unsigned int inputWidth = 2;
34     const unsigned int inputChannels = 1;
35     const unsigned int inputNum = 2;
36 
37     unsigned int outputHeight = inputHeight;
38     unsigned int outputWidth = inputWidth;
39     unsigned int outputChannels = inputChannels;
40     unsigned int outputNum = inputNum;
41 
42     unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
43     unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
44 
45     auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
46     auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
47 
48     std::vector<float> input =
49     {
50         // Batch #0
51         1.0f, 2.0f,
52         3.0f, 4.0f,
53         // Batch #1
54         5.0f, 6.0f,
55         7.0f, 8.0f
56     };
57 
58     std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
59     std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
60 
61     float alpha = 1.f;
62     float beta = 1.f;
63     float kappa = 1.f;
64     uint32_t normSize = 3;
65 
66     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
67     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
68 
69     armnn::NormalizationQueueDescriptor data;
70     armnn::WorkloadInfo info;
71     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
72     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
73     data.m_Parameters.m_NormChannelType = normChannel;
74     data.m_Parameters.m_NormMethodType = normMethod;
75     data.m_Parameters.m_NormSize = normSize;
76     data.m_Parameters.m_Alpha = alpha;
77     data.m_Parameters.m_Beta = beta;
78     data.m_Parameters.m_K = kappa;
79     data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
80 
81     armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
82     armnn::NormalizationQueueDescriptor refData = data;
83     armnn::WorkloadInfo refInfo = info;
84     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
85 
86     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
87                                                                                 data,
88                                                                                 info);
89 
90     inputHandle->Allocate();
91     outputHandle->Allocate();
92 
93     CopyDataToITensorHandle(inputHandle.get(), input.data());
94 
95     ExecuteWorkload(*workload, memoryManager);
96 
97     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
98 
99     switch (normMethod)
100     {
101         case armnn::NormalizationAlgorithmMethod::LocalBrightness:
102         {
103             switch (normChannel)
104             {
105                 case armnn::NormalizationAlgorithmChannel::Within:
106                 {
107                     // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
108                     // Therefore, all output values should equal the inputs, but divided by:
109                     // pow((kappa + (accumulatedScale * alpha)), beta)
110                     // ...where accumulatedScale is the sum of every element squared.
111                     float divisor[inputNum];
112 
113                     float accumulatedScale1 = 0.0f;
114                     for (size_t i = 0; i < input.size()/2; ++i)
115                     {
116                         accumulatedScale1 += input[i]*input[i];
117                     }
118 
119                     float accumulatedScale2 = 0.0f;
120                     for (size_t i = input.size()/2; i < input.size(); ++i)
121                     {
122                         accumulatedScale2 += input[i]*input[i];
123                     }
124 
125                     divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
126                     divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
127 
128                     std::vector<float> output;
129                     unsigned int divisorIndex = 0;
130                     for (size_t i = 0; i < input.size(); ++i)
131                     {
132                         if (i == input.size()/2)
133                         {
134                             divisorIndex++;
135                         }
136                         output.emplace_back(input[i]/divisor[divisorIndex]);
137                     }
138 
139                     expectedOutput = output;
140                     break;
141                 }
142                 case armnn::NormalizationAlgorithmChannel::Across:
143                 {
144                     // When normalising across channels, all output values should equal the inputs, but multiplied by:
145                     // pow((kappa + (accumulatedScale * alpha)), -beta)
146                     // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
147                     // ...where adjacent channels means within half the normSize for the channel
148                     // The test data has only one channel, so this is simplified below.
149                     std::vector<float> outputVector;
150 
151                     for (unsigned int i = 0; i < input.size(); ++i)
152                     {
153                         float accumulatedScale = input[i]*input[i];
154                         float scale = powf((kappa + accumulatedScale * alpha), -beta);
155                         outputVector.push_back(input[i] * scale);
156                     }
157                     expectedOutput = outputVector;
158                     break;
159                 }
160                 default:
161                 {
162                     throw armnn::UnimplementedException("Unsupported normalisation channel type, "
163                                                         "only Across and Within are supported");
164                 }
165             }
166             break;
167         }
168         case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
169         default:
170         {
171             throw armnn::UnimplementedException("Unsupported normalisation method type, "
172                                                 "only LocalBrightness is supported");
173         }
174     }
175 
176     return LayerTestResult<float, 4>(actualOutput,
177                                      expectedOutput,
178                                      outputHandle->GetShape(),
179                                      outputTensorInfo.GetShape());
180 }
181 
SimpleNormalizationNhwcTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::NormalizationAlgorithmChannel normChannel,armnn::NormalizationAlgorithmMethod normMethod)182 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
183     armnn::IWorkloadFactory& workloadFactory,
184     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185     const armnn::ITensorHandleFactory& tensorHandleFactory,
186     armnn::NormalizationAlgorithmChannel normChannel,
187     armnn::NormalizationAlgorithmMethod normMethod)
188 {
189     const unsigned int inputHeight = 2;
190     const unsigned int inputWidth = 2;
191     const unsigned int inputChannels = 1;
192     const unsigned int inputNum = 2;
193 
194     unsigned int outputHeight = inputHeight;
195     unsigned int outputWidth = inputWidth;
196     unsigned int outputChannels = inputChannels;
197     unsigned int outputNum = inputNum;
198 
199     unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
200     unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
201 
202     auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
203     auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
204 
205     std::vector<float> input =
206     {
207         // Batch #0
208         1.0f, 2.0f,
209         3.0f, 4.0f,
210         // Batch #1
211         5.0f, 6.0f,
212         7.0f, 8.0f
213     };
214 
215     std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
216     std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
217 
218     float alpha = 1.f;
219     float beta = 1.f;
220     float kappa = 1.f;
221     uint32_t normSize = 3;
222 
223     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
224     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
225 
226     armnn::NormalizationQueueDescriptor data;
227     armnn::WorkloadInfo info;
228     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
229     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
230     data.m_Parameters.m_NormChannelType = normChannel;
231     data.m_Parameters.m_NormMethodType = normMethod;
232     data.m_Parameters.m_NormSize = normSize;
233     data.m_Parameters.m_Alpha = alpha;
234     data.m_Parameters.m_Beta = beta;
235     data.m_Parameters.m_K = kappa;
236     data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
237 
238     armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
239     armnn::NormalizationQueueDescriptor refData = data;
240     armnn::WorkloadInfo refInfo = info;
241     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
242 
243     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
244                                                                                 data,
245                                                                                 info);
246 
247     inputHandle->Allocate();
248     outputHandle->Allocate();
249 
250     CopyDataToITensorHandle(inputHandle.get(), input.data());
251 
252     ExecuteWorkload(*workload, memoryManager);
253 
254     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
255 
256     switch (normMethod)
257     {
258         case armnn::NormalizationAlgorithmMethod::LocalBrightness:
259         {
260             switch (normChannel)
261             {
262                 case armnn::NormalizationAlgorithmChannel::Across:
263                 {
264                     expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
265                                        0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
266                     break;
267                 }
268                 default:
269                 {
270                     throw armnn::UnimplementedException("Unsupported normalisation channel type, "
271                                                         "Only Cross-map is supported for NHWC layout");
272                 }
273             }
274             break;
275         }
276         case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
277         default:
278         {
279             throw armnn::UnimplementedException("Unsupported normalisation method type, "
280                                                 "only LocalBrightness is supported");
281         }
282     }
283 
284     return LayerTestResult<float, 4>(actualOutput,
285                                      expectedOutput,
286                                      outputHandle->GetShape(),
287                                      outputTensorInfo.GetShape());
288 }
289 
CompareNormalizationTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::NormalizationAlgorithmChannel normChannel,armnn::NormalizationAlgorithmMethod normMethod)290 LayerTestResult<float,4> CompareNormalizationTestImpl(
291     armnn::IWorkloadFactory& workloadFactory,
292     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
293     armnn::IWorkloadFactory& refWorkloadFactory,
294     const armnn::ITensorHandleFactory& tensorHandleFactory,
295     const armnn::ITensorHandleFactory& refTensorHandleFactory,
296     armnn::NormalizationAlgorithmChannel normChannel,
297     armnn::NormalizationAlgorithmMethod normMethod)
298 {
299     constexpr unsigned int inputNum = 5;
300     constexpr unsigned int inputChannels = 3;
301     constexpr unsigned int inputHeight = 32;
302     constexpr unsigned int inputWidth = 24;
303 
304     constexpr unsigned int outputNum = inputNum;
305     constexpr unsigned int outputChannels = inputChannels;
306     constexpr unsigned int outputHeight = inputHeight;
307     constexpr unsigned int outputWidth = inputWidth;
308 
309     armnn::TensorInfo inputTensorInfo;
310     armnn::TensorInfo outputTensorInfo;
311 
312     unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
313     unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
314 
315     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
316     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
317 
318     LayerTestResult<float,4> ret(outputTensorInfo);
319 
320     auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
321 
322     std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
323     std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
324 
325     constexpr float alpha = 1.f;
326     constexpr float beta = 1.f;
327     constexpr float kappa = 1.f;
328     constexpr uint32_t normSize = 5;
329 
330     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
331     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
332 
333     armnn::NormalizationQueueDescriptor data;
334     armnn::WorkloadInfo info;
335     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
336     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
337     data.m_Parameters.m_NormChannelType = normChannel;
338     data.m_Parameters.m_NormMethodType  = normMethod;
339     data.m_Parameters.m_NormSize        = normSize;
340     data.m_Parameters.m_Alpha           = alpha;
341     data.m_Parameters.m_Beta            = beta;
342     data.m_Parameters.m_K               = kappa;
343 
344     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
345     std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
346 
347     armnn::NormalizationQueueDescriptor refData = data;
348     armnn::WorkloadInfo refInfo = info;
349     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
350     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
351 
352     // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
353     armnn::BackendId backend = workloadFactory.GetBackendId();
354     auto handle = armnn::GetILayerSupportByBackendId(backend);
355     ret.m_Supported = handle.IsNormalizationSupported(inputTensorInfo, outputTensorInfo, data.m_Parameters);
356 
357     if (!ret.m_Supported)
358     {
359         return ret;
360     }
361 
362     std::unique_ptr<armnn::IWorkload> workload
363             = workloadFactory.CreateWorkload(armnn::LayerType::Normalization, data, info);
364     std::unique_ptr<armnn::IWorkload> workloadRef
365             = refWorkloadFactory.CreateWorkload(armnn::LayerType::Normalization, refData, refInfo);
366 
367     outputHandleRef->Allocate();
368     inputHandleRef->Allocate();
369 
370     inputHandle->Allocate();
371     outputHandle->Allocate();
372 
373     CopyDataToITensorHandle(inputHandle.get(), input.data());
374     CopyDataToITensorHandle(inputHandleRef.get(), input.data());
375 
376     ExecuteWorkload(*workload, memoryManager);
377 
378     workloadRef->Execute();
379 
380     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
381     CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
382     ret.m_ActualData = actualOutput;
383     ret.m_ExpectedData = expectedOutput;
384 
385     return ret;
386 }
387 
AcrossChannelNormalizationTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::NormalizationAlgorithmChannel normChannel,armnn::NormalizationAlgorithmMethod normMethod)388 LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
389     armnn::IWorkloadFactory& workloadFactory,
390     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
391     const armnn::ITensorHandleFactory& tensorHandleFactory,
392     armnn::NormalizationAlgorithmChannel normChannel,
393     armnn::NormalizationAlgorithmMethod normMethod)
394 {
395     const unsigned int inputHeight = 1;
396     const unsigned int inputWidth = 2;
397     const unsigned int inputChannels = 3;
398     const unsigned int inputNum = 2;
399 
400     unsigned int outputHeight = inputHeight;
401     unsigned int outputWidth = inputWidth;
402     unsigned int outputChannels = inputChannels;
403     unsigned int outputNum = inputNum;
404 
405     unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
406     unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
407 
408     auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
409     auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
410 
411     std::vector<float> input =
412     {
413         // Batch #0
414         -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
415         // Batch #1
416         -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
417     };
418 
419     std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
420     std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
421 
422     float alpha = 4.f;
423     float beta  = 0.5f;
424     float kappa = 9.f;
425     uint32_t normSize = 5;
426 
427     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
428     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
429 
430     armnn::NormalizationQueueDescriptor data;
431     armnn::WorkloadInfo info;
432     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
433     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
434     data.m_Parameters.m_NormChannelType = normChannel;
435     data.m_Parameters.m_NormMethodType = normMethod;
436     data.m_Parameters.m_NormSize = normSize;
437     data.m_Parameters.m_Alpha = alpha;
438     data.m_Parameters.m_Beta = beta;
439     data.m_Parameters.m_K = kappa;
440     data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
441 
442     armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
443     armnn::NormalizationQueueDescriptor refData = data;
444     armnn::WorkloadInfo refInfo = info;
445     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
446 
447     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
448                                                                                 data,
449                                                                                 info);
450 
451     inputHandle->Allocate();
452     outputHandle->Allocate();
453 
454     CopyDataToITensorHandle(inputHandle.get(), input.data());
455 
456     ExecuteWorkload(*workload, memoryManager);
457 
458     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
459 
460     switch (normMethod)
461     {
462         case armnn::NormalizationAlgorithmMethod::LocalBrightness:
463         {
464             switch (normChannel)
465             {
466                 case armnn::NormalizationAlgorithmChannel::Across:
467                 {
468                     expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
469                                        -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
470                     break;
471                 }
472                 default:
473                 {
474                     throw armnn::UnimplementedException("Unsupported normalisation channel type, "
475                                                         "only Across and Within are supported");
476                 }
477             }
478             break;
479         }
480         case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
481         default:
482         {
483             throw armnn::UnimplementedException("Unsupported normalisation method type, "
484                                                 "only LocalBrightness is supported");
485         }
486     }
487 
488     return LayerTestResult<float, 4>(actualOutput,
489                                      expectedOutput,
490                                      outputHandle->GetShape(),
491                                      outputTensorInfo.GetShape());
492 }
493 
494 } // anonymous namespace
495 
SimpleNormalizationAcrossTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)496 LayerTestResult<float,4> SimpleNormalizationAcrossTest(
497     armnn::IWorkloadFactory& workloadFactory,
498     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499     const armnn::ITensorHandleFactory& tensorHandleFactory)
500 {
501     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
502     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
503     return SimpleNormalizationTestImpl(workloadFactory, memoryManager,  tensorHandleFactory, normChannel, normMethod);
504 }
505 
SimpleNormalizationWithinTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)506 LayerTestResult<float,4> SimpleNormalizationWithinTest(
507     armnn::IWorkloadFactory& workloadFactory,
508     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
509     const armnn::ITensorHandleFactory& tensorHandleFactory)
510 {
511     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
512     auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
513     return SimpleNormalizationTestImpl(workloadFactory, memoryManager,  tensorHandleFactory, normChannel, normMethod);
514 }
515 
SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)516 LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
517     armnn::IWorkloadFactory& workloadFactory,
518     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
519     const armnn::ITensorHandleFactory& tensorHandleFactory)
520 {
521     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
522     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
523     return SimpleNormalizationNhwcTestImpl(
524             workloadFactory, memoryManager,  tensorHandleFactory, normChannel, normMethod);
525 }
526 
CompareNormalizationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::NormalizationAlgorithmChannel normChannel,armnn::NormalizationAlgorithmMethod normMethod)527 LayerTestResult<float,4> CompareNormalizationTest(
528     armnn::IWorkloadFactory& workloadFactory,
529     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
530     armnn::IWorkloadFactory& refWorkloadFactory,
531     const armnn::ITensorHandleFactory& tensorHandleFactory,
532     const armnn::ITensorHandleFactory& refTensorHandleFactory,
533     armnn::NormalizationAlgorithmChannel normChannel,
534     armnn::NormalizationAlgorithmMethod normMethod)
535 {
536     return CompareNormalizationTestImpl(
537             workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
538             normChannel, normMethod);
539 }
540 
AcrossChannelNormalizationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)541 LayerTestResult<float,4> AcrossChannelNormalizationTest(
542     armnn::IWorkloadFactory& workloadFactory,
543     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
544     const armnn::ITensorHandleFactory& tensorHandleFactory)
545 {
546     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
547     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
548     return AcrossChannelNormalizationTestImpl(workloadFactory,
549                                                memoryManager,
550                                                tensorHandleFactory,
551                                                normChannel,
552                                                normMethod);
553 }
554