xref: /aosp_15_r20/external/armnn/src/backends/cl/ClLayerSupport.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
8 #include "ClBackendModelContext.hpp"
9 
10 #include <armnn/BackendRegistry.hpp>
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
15 #include <armnn/utility/IgnoreUnused.hpp>
16 #include <armnn/utility/PolymorphicDowncast.hpp>
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
19 #include <aclCommon/ArmComputeUtils.hpp>
20 #include <aclCommon/ArmComputeTensorUtils.hpp>
21 #include "workloads/ClAbsWorkload.hpp"
22 #include "workloads/ClAdditionWorkload.hpp"
23 #include "workloads/ClActivationWorkload.hpp"
24 #include "workloads/ClArgMinMaxWorkload.hpp"
25 #include "workloads/ClBatchMatMulWorkload.hpp"
26 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
27 #include "workloads/ClBatchToSpaceNdWorkload.hpp"
28 #include "workloads/ClCastWorkload.hpp"
29 #include "workloads/ClChannelShuffleWorkload.hpp"
30 #include "workloads/ClComparisonWorkload.hpp"
31 #include "workloads/ClConstantWorkload.hpp"
32 #include "workloads/ClConvertFp16ToFp32Workload.hpp"
33 #include "workloads/ClConvertFp32ToFp16Workload.hpp"
34 #include "workloads/ClConvolution2dWorkload.hpp"
35 #include "workloads/ClConvolution3dWorkload.hpp"
36 #include "workloads/ClDepthToSpaceWorkload.hpp"
37 #include "workloads/ClDepthwiseConvolutionWorkload.hpp"
38 #include "workloads/ClDequantizeWorkload.hpp"
39 #include "workloads/ClDivisionWorkload.hpp"
40 #include "workloads/ClExpWorkload.hpp"
41 #include "workloads/ClFillWorkload.hpp"
42 #include "workloads/ClFloorFloatWorkload.hpp"
43 #include "workloads/ClFullyConnectedWorkload.hpp"
44 #include "workloads/ClGatherWorkload.hpp"
45 #include "workloads/ClGatherNdWorkload.hpp"
46 #include "workloads/ClInstanceNormalizationWorkload.hpp"
47 #include "workloads/ClL2NormalizationFloatWorkload.hpp"
48 #include "workloads/ClLogWorkload.hpp"
49 #include "workloads/ClLogSoftmaxWorkload.hpp"
50 #include "workloads/ClLogicalAndWorkload.hpp"
51 #include "workloads/ClLogicalNotWorkload.hpp"
52 #include "workloads/ClLogicalOrWorkload.hpp"
53 #include "workloads/ClLstmFloatWorkload.hpp"
54 #include "workloads/ClMaximumWorkload.hpp"
55 #include "workloads/ClMeanWorkload.hpp"
56 #include "workloads/ClConcatWorkload.hpp"
57 #include "workloads/ClMinimumWorkload.hpp"
58 #include "workloads/ClMultiplicationWorkload.hpp"
59 #include "workloads/ClNegWorkload.hpp"
60 #include "workloads/ClNormalizationFloatWorkload.hpp"
61 #include "workloads/ClPadWorkload.hpp"
62 #include "workloads/ClPermuteWorkload.hpp"
63 #include "workloads/ClPooling2dWorkload.hpp"
64 #include "workloads/ClPooling3dWorkload.hpp"
65 #include "workloads/ClPreluWorkload.hpp"
66 #include "workloads/ClQLstmWorkload.hpp"
67 #include "workloads/ClQuantizedLstmWorkload.hpp"
68 #include "workloads/ClQuantizeWorkload.hpp"
69 #include "workloads/ClReduceWorkload.hpp"
70 #include "workloads/ClReshapeWorkload.hpp"
71 #include "workloads/ClResizeWorkload.hpp"
72 #include "workloads/ClRsqrtWorkload.hpp"
73 #include "workloads/ClSinWorkload.hpp"
74 #include "workloads/ClSliceWorkload.hpp"
75 #include "workloads/ClSoftmaxWorkload.hpp"
76 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
77 #include "workloads/ClSpaceToDepthWorkload.hpp"
78 #include "workloads/ClSplitterWorkload.hpp"
79 #include "workloads/ClSqrtWorkload.hpp"
80 #include "workloads/ClStackWorkload.hpp"
81 #include "workloads/ClStridedSliceWorkload.hpp"
82 #include "workloads/ClSubtractionWorkload.hpp"
83 #include "workloads/ClTransposeConvolution2dWorkload.hpp"
84 #include "workloads/ClTransposeWorkload.hpp"
85 #include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
86 #endif
87 
88 
89 namespace armnn
90 {
91 
92 namespace
93 {
94 
95 template<unsigned int FilterSize>
IsMatchingSize2d(const TensorInfo & weightInfo)96 bool IsMatchingSize2d(const TensorInfo& weightInfo)
97 {
98     // Width & Height must match.
99     return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
100 }
101 
102 template<uint32_t ValidStride>
IsMatchingStride(uint32_t actualStride)103 bool IsMatchingStride(uint32_t actualStride)
104 {
105     return ValidStride == actualStride;
106 }
107 
108 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
IsMatchingStride(uint32_t actualStride)109 bool IsMatchingStride(uint32_t actualStride)
110 {
111     return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
112 }
113 
114 template<typename ... Args>
IsClBackendSupported(Optional<std::string &> reasonIfUnsupported,Args...args)115 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
116 {
117     IgnoreUnused(reasonIfUnsupported, (args)...);
118 #if defined(ARMCOMPUTECL_ENABLED)
119     return true;
120 #else
121     if (reasonIfUnsupported)
122     {
123         reasonIfUnsupported.value() = "The armnn library has been built without CL support";
124     }
125     return false;
126 #endif
127 }
128 
129 #if defined(ARMCOMPUTECL_ENABLED)
130 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
131 #else
132 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
133 #endif
134 
135 #if defined(ARMCOMPUTECL_ENABLED)
136 template<class FuncType, class... Args>
IsWorkloadSupported(FuncType && func,Optional<std::string &> reasonIfUnsupported,Args &&...args)137 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
138 {
139     arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
140     const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
141     if (!supported && reasonIfUnsupported)
142     {
143         reasonIfUnsupported.value() = aclStatus.error_description();
144     }
145     return supported;
146 }
147 
148 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
149     return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
150 #else
151 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
152     return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
153 #endif
154 
155 template<typename FloatFunc, typename Uint8Func, typename ... Params>
IsSupportedForDataTypeCl(Optional<std::string &> reasonIfUnsupported,DataType dataType,FloatFunc floatFuncPtr,Uint8Func uint8FuncPtr,Params &&...params)156 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
157                               DataType dataType,
158                               FloatFunc floatFuncPtr,
159                               Uint8Func uint8FuncPtr,
160                               Params&&... params)
161 {
162     return IsClBackendSupported(reasonIfUnsupported) &&
163         IsSupportedForDataTypeGeneric(reasonIfUnsupported,
164                                       dataType,
165                                       floatFuncPtr,
166                                       floatFuncPtr,
167                                       uint8FuncPtr,
168                                       &FalseFunc<>,
169                                       &FalseFunc<>,
170                                       std::forward<Params>(params)...);
171 }
172 } // anonymous namespace
173 
ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr & modelContextPtr)174 ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
175     : m_ModelContextPtr(modelContextPtr)
176 {
177 }
178 
ClLayerSupport()179 ClLayerSupport::ClLayerSupport()
180     : m_ModelContextPtr(nullptr)
181 {
182 }
183 
IsLayerSupported(const LayerType & type,const std::vector<TensorInfo> & infos,const BaseDescriptor & descriptor,const Optional<LstmInputParamsInfo> & lstmParamsInfo,const Optional<QuantizedLstmInputParamsInfo> & quantizedLstmParamsInfo,Optional<std::string &> reasonIfUnsupported) const184 bool ClLayerSupport::IsLayerSupported(const LayerType& type,
185                                       const std::vector<TensorInfo>& infos,
186                                       const BaseDescriptor& descriptor,
187                                       const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188                                       const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189                                       Optional<std::string&> reasonIfUnsupported) const
190 {
191     switch (type)
192     {
193         case LayerType::Activation:
194             return IsActivationSupported(infos[0],
195                                          infos[1],
196                                          *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
197                                          reasonIfUnsupported);
198         case LayerType::Addition:
199             ARMNN_NO_DEPRECATE_WARN_BEGIN
200             return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
201             ARMNN_NO_DEPRECATE_WARN_END
202         case LayerType::ArgMinMax:
203             return IsArgMinMaxSupported(infos[0],
204                                         infos[1],
205                                         *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
206                                         reasonIfUnsupported);
207         case LayerType::BatchMatMul:
208             return IsBatchMatMulSupported(infos[0],
209                                           infos[1],
210                                           infos[2],
211                                           *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
212                                           reasonIfUnsupported);
213         case LayerType::BatchNormalization:
214             return IsBatchNormalizationSupported(infos[0],
215                                                  infos[1],
216                                                  infos[2],
217                                                  infos[3],
218                                                  infos[4],
219                                                  infos[5],
220                                                  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
221                                                      (&descriptor)),
222                                                  reasonIfUnsupported);
223         case LayerType::BatchToSpaceNd:
224             return IsBatchToSpaceNdSupported(infos[0],
225                                              infos[1],
226                                              *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
227                                              reasonIfUnsupported);
228         case LayerType::Cast:
229             return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
230         case LayerType::ChannelShuffle:
231             return IsChannelShuffleSupported(infos[0],
232                                              infos[1],
233                                              *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
234                                              reasonIfUnsupported);
235         case LayerType::Comparison:
236             return IsComparisonSupported(infos[0],
237                                          infos[1],
238                                          infos[2],
239                                          *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
240                                          reasonIfUnsupported);
241         case LayerType::Concat:
242         {
243             std::vector<const TensorInfo*> inputInfos;
244             for (uint32_t i = 0; i < (infos.size() - 1); i++)
245             {
246                 inputInfos.push_back(&infos[i]);
247             }
248             return IsConcatSupported(inputInfos,
249                                      infos[infos.size() - 1],
250                                      *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
251                                      reasonIfUnsupported);
252         }
253         case LayerType::Constant:
254             return IsConstantSupported(infos[0], reasonIfUnsupported);
255         case LayerType::ConvertFp16ToFp32:
256             return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
257         case LayerType::ConvertFp32ToFp16:
258             return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
259         case LayerType::Convolution2d:
260         {
261             if (infos.size() != 4)
262             {
263                 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
264                                                "TensorInfos should be of format: {input, output, weights, biases}.");
265             }
266 
267             auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
268             if (infos[3] == TensorInfo())
269             {
270                 return IsConvolution2dSupported(infos[0],
271                                                 infos[1],
272                                                 desc,
273                                                 infos[2],
274                                                 EmptyOptional(),
275                                                 reasonIfUnsupported);
276             }
277             else
278             {
279                 return IsConvolution2dSupported(infos[0],
280                                                 infos[1],
281                                                 desc,
282                                                 infos[2],
283                                                 infos[3],
284                                                 reasonIfUnsupported);
285             }
286         }
287         case LayerType::Convolution3d:
288         {
289             if (infos.size() != 4)
290             {
291                 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
292                                                "TensorInfos should be of format: {input, output, weights, biases}.");
293             }
294 
295             auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
296             if (infos[3] == TensorInfo())
297             {
298                 return IsConvolution3dSupported(infos[0],
299                                                 infos[1],
300                                                 desc,
301                                                 infos[2],
302                                                 EmptyOptional(),
303                                                 reasonIfUnsupported);
304             }
305             else
306             {
307                 return IsConvolution3dSupported(infos[0],
308                                                 infos[1],
309                                                 desc,
310                                                 infos[2],
311                                                 infos[3],
312                                                 reasonIfUnsupported);
313             }
314         }
315         case LayerType::DepthToSpace:
316             return IsDepthToSpaceSupported(infos[0],
317                                            infos[1],
318                                            *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
319                                            reasonIfUnsupported);
320         case LayerType::DepthwiseConvolution2d:
321         {
322             if (infos.size() != 4)
323             {
324                 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
325                                                "TensorInfos should be of format: {input, output, weights, biases}.");
326             }
327 
328             auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
329             if (infos[3] == TensorInfo())
330             {
331                 return IsDepthwiseConvolutionSupported(infos[0],
332                                                 infos[1],
333                                                 desc,
334                                                 infos[2],
335                                                 EmptyOptional(),
336                                                 reasonIfUnsupported);
337             }
338             else
339             {
340                 return IsDepthwiseConvolutionSupported(infos[0],
341                                                          infos[1],
342                                                          desc,
343                                                          infos[2],
344                                                          infos[3],
345                                                          reasonIfUnsupported);
346             }
347         }
348         case LayerType::Dequantize:
349             return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
350         case LayerType::Division:
351             ARMNN_NO_DEPRECATE_WARN_BEGIN
352             return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
353             ARMNN_NO_DEPRECATE_WARN_END
354         case LayerType::ElementwiseBinary:
355         {
356             auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
357 
358             switch (desc.m_Operation)
359             {
360                 case BinaryOperation::Add:
361                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
362                                                    reasonIfUnsupported,
363                                                    infos[0],
364                                                    infos[1],
365                                                    infos[2],
366                                                    nullptr);
367                 case BinaryOperation::Div:
368                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
369                                                    reasonIfUnsupported,
370                                                    infos[0],
371                                                    infos[1],
372                                                    infos[2],
373                                                    nullptr);
374                 case BinaryOperation::Minimum:
375                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
376                                                    reasonIfUnsupported,
377                                                    infos[0],
378                                                    infos[1],
379                                                    infos[2]);
380                 case BinaryOperation::Maximum:
381                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
382                                                    reasonIfUnsupported,
383                                                    infos[0],
384                                                    infos[1],
385                                                    infos[2]);
386                 case BinaryOperation::Mul:
387                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
388                                                    reasonIfUnsupported,
389                                                    infos[0],
390                                                    infos[1],
391                                                    infos[2],
392                                                    nullptr);
393                 case BinaryOperation::Sub:
394                     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
395                                                    reasonIfUnsupported,
396                                                    infos[0],
397                                                    infos[1],
398                                                    infos[2],
399                                                    nullptr);
400                 default:
401                     return false;
402             }
403         }
404         case LayerType::ElementwiseUnary:
405             return IsElementwiseUnarySupported(infos[0],
406                                                infos[1],
407                                                *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
408                                                reasonIfUnsupported);
409         case LayerType::Fill:
410             return IsFillSupported(infos[0],
411                                    infos[1],
412                                    *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
413                                    reasonIfUnsupported);
414         case LayerType::Floor:
415             return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
416         case LayerType::FullyConnected:
417             return IsFullyConnectedSupported(infos[0],
418                                              infos[1],
419                                              infos[2],
420                                              infos[3],
421                                              *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
422                                              reasonIfUnsupported);
423         case LayerType::Gather:
424             return IsGatherSupported(infos[0],
425                                      infos[1],
426                                      infos[2],
427                                      *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
428                                      reasonIfUnsupported);
429         case LayerType::GatherNd:
430             return IsGatherNdSupported(infos[0],
431                                        infos[1],
432                                        infos[2],
433                                        reasonIfUnsupported);
434         case LayerType::Input:
435             return IsInputSupported(infos[0], reasonIfUnsupported);
436         case LayerType::InstanceNormalization:
437             return IsInstanceNormalizationSupported(infos[0],
438                                                     infos[1],
439                                                     *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
440                                                         (&descriptor)),
441                                                     reasonIfUnsupported);
442         case LayerType::L2Normalization:
443             return IsL2NormalizationSupported(infos[0],
444                                               infos[1],
445                                               *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
446                                               reasonIfUnsupported);
447         case LayerType::LogicalBinary:
448             return IsLogicalBinarySupported(infos[0],
449                                             infos[1],
450                                             infos[2],
451                                             *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
452                                             reasonIfUnsupported);
453         case LayerType::LogSoftmax:
454             return IsLogSoftmaxSupported(infos[0],
455                                          infos[1],
456                                          *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
457                                          reasonIfUnsupported);
458         case LayerType::Lstm:
459             return IsLstmSupported(infos[0],
460                                    infos[1],
461                                    infos[2],
462                                    infos[3],
463                                    infos[4],
464                                    infos[5],
465                                    infos[6],
466                                    *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
467                                    lstmParamsInfo.value(),
468                                    reasonIfUnsupported);
469         case LayerType::Map:
470             return true;
471         case LayerType::MemCopy:
472             return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
473         case LayerType::MemImport:
474             return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
475         case LayerType::Merge:
476             return LayerSupportBase::IsMergeSupported(infos[0],
477                                                       infos[1],
478                                                       infos[2],
479                                                       reasonIfUnsupported);
480         case LayerType::Maximum:
481             ARMNN_NO_DEPRECATE_WARN_BEGIN
482             return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
483             ARMNN_NO_DEPRECATE_WARN_END
484         case LayerType::Mean:
485             return IsMeanSupported(infos[0],
486                                    infos[1],
487                                    *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
488                                    reasonIfUnsupported);
489         case LayerType::Minimum:
490             ARMNN_NO_DEPRECATE_WARN_BEGIN
491             return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
492             ARMNN_NO_DEPRECATE_WARN_END
493         case LayerType::Multiplication:
494             ARMNN_NO_DEPRECATE_WARN_BEGIN
495             return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
496             ARMNN_NO_DEPRECATE_WARN_END
497         case LayerType::Normalization:
498             return IsNormalizationSupported(infos[0],
499                                             infos[1],
500                                             *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
501                                             reasonIfUnsupported);
502         case LayerType::Output:
503             return IsOutputSupported(infos[0], reasonIfUnsupported);
504         case LayerType::Pad:
505             return IsPadSupported(infos[0],
506                                   infos[1],
507                                   *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
508                                   reasonIfUnsupported);
509         case LayerType::Permute:
510             return IsPermuteSupported(infos[0],
511                                       infos[1],
512                                       *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
513                                       reasonIfUnsupported);
514         case LayerType::Pooling2d:
515             return IsPooling2dSupported(infos[0],
516                                         infos[1],
517                                         *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
518                                         reasonIfUnsupported);
519         case LayerType::Pooling3d:
520             return IsPooling3dSupported(infos[0],
521                                         infos[1],
522                                         *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
523                                         reasonIfUnsupported);
524         case LayerType::Prelu:
525             return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
526         case LayerType::QLstm:
527             return IsQLstmSupported(infos[0],
528                                     infos[1],
529                                     infos[2],
530                                     infos[3],
531                                     infos[4],
532                                     infos[5],
533                                     *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
534                                     lstmParamsInfo.value(),
535                                     reasonIfUnsupported);
536         case LayerType::Quantize:
537             return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
538         case LayerType::QuantizedLstm:
539             return IsQuantizedLstmSupported(infos[0],
540                                             infos[1],
541                                             infos[2],
542                                             infos[3],
543                                             infos[4],
544                                             quantizedLstmParamsInfo.value(),
545                                             reasonIfUnsupported);
546         case LayerType::Rank:
547             return true;
548         case LayerType::Reduce:
549             return IsReduceSupported(infos[0],
550                                      infos[1],
551                                      *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
552                                      reasonIfUnsupported);
553         case LayerType::Reshape:
554             return IsReshapeSupported(infos[0],
555                                       infos[1],
556                                       *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
557                                       reasonIfUnsupported);
558         case LayerType::Resize:
559             return IsResizeSupported(infos[0],
560                                      infos[1],
561                                      *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
562                                      reasonIfUnsupported);
563         case LayerType::Shape:
564             return LayerSupportBase::IsShapeSupported(infos[0],
565                                                       infos[1],
566                                                       reasonIfUnsupported);
567         case LayerType::Slice:
568             return IsSliceSupported(infos[0],
569                                     infos[1],
570                                     *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
571                                     reasonIfUnsupported);
572         case LayerType::Softmax:
573             return IsSoftmaxSupported(infos[0],
574                                       infos[1],
575                                       *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
576                                       reasonIfUnsupported);
577         case LayerType::SpaceToBatchNd:
578             return IsSpaceToBatchNdSupported(infos[0],
579                                              infos[1],
580                                              *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
581                                              reasonIfUnsupported);
582         case LayerType::SpaceToDepth:
583             return IsSpaceToDepthSupported(infos[0],
584                                            infos[1],
585                                            *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
586                                            reasonIfUnsupported);
587         case LayerType::Splitter:
588         {
589             std::vector<TensorInfo> outputInfos;
590             for (uint32_t i = 1; i < infos.size(); i++)
591             {
592                 outputInfos.push_back(infos[i]);
593             }
594             return IsSplitterSupported(infos[0],
595                                        {outputInfos.begin(), outputInfos.end()},
596                                        *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
597                                        reasonIfUnsupported);
598         }
599         case LayerType::Stack:
600         {
601             std::vector<const TensorInfo*> inputInfos;
602             for (uint32_t i = 0; i < infos.size() - 1; i++)
603             {
604                 inputInfos.push_back(&infos[i]);
605             }
606             return IsStackSupported(inputInfos,
607                                     infos[infos.size() - 1],
608                                     *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
609                                     reasonIfUnsupported);
610         }
611         case LayerType::StridedSlice:
612             return IsStridedSliceSupported(infos[0],
613                                            infos[1],
614                                            *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
615                                            reasonIfUnsupported);
616         case LayerType::Subtraction:
617             ARMNN_NO_DEPRECATE_WARN_BEGIN
618             return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
619             ARMNN_NO_DEPRECATE_WARN_END
620         case LayerType::Transpose:
621             return IsTransposeSupported(infos[0],
622                                         infos[1],
623                                         *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
624                                         reasonIfUnsupported);
625         case LayerType::TransposeConvolution2d:
626         {
627             if (infos.size() != 4)
628             {
629                 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
630                                                "TensorInfos should be of format: {input, output, weights, biases}.");
631             }
632 
633             auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
634             if (infos[3] == TensorInfo())
635             {
636                 return IsTransposeConvolution2dSupported(infos[0],
637                                                          infos[1],
638                                                          desc,
639                                                          infos[2],
640                                                          EmptyOptional(),
641                                                          reasonIfUnsupported);
642             }
643             else
644             {
645                 return IsTransposeConvolution2dSupported(infos[0],
646                                                          infos[1],
647                                                          desc,
648                                                          infos[2],
649                                                          infos[3],
650                                                          reasonIfUnsupported);
651             }
652         }
653         case LayerType::UnidirectionalSequenceLstm:
654             return IsUnidirectionalSequenceLstmSupported(infos[0],
655                                                          infos[1],
656                                                          infos[2],
657                                                          infos[3],
658                                                          infos[4],
659                                                          infos[5],
660                                                          *(PolymorphicDowncast<const
661                                                             UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
662                                                          lstmParamsInfo.value(),
663                                                          reasonIfUnsupported);
664         case LayerType::Unmap:
665             return true;
666         default:
667             // layers not supported in cl by default:
668             // debug, detectionpostprocess, fakequantization,
669             // precompiled, standin, switch, pooling3d
670             return false;
671     }
672 }
673 
IsActivationSupported(const TensorInfo & input,const TensorInfo & output,const ActivationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const674 bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
675                                            const TensorInfo& output,
676                                            const ActivationDescriptor& descriptor,
677                                            Optional<std::string&> reasonIfUnsupported) const
678 {
679     FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
680                                    reasonIfUnsupported,
681                                    input,
682                                    output,
683                                    descriptor);
684 }
685 
IsAdditionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const686 bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
687                                          const TensorInfo& input1,
688                                          const TensorInfo& output,
689                                          Optional<std::string&> reasonIfUnsupported) const
690 {
691     FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
692                                    reasonIfUnsupported,
693                                    input0,
694                                    input1,
695                                    output,
696                                    nullptr);
697 }
698 
IsArgMinMaxSupported(const TensorInfo & input,const TensorInfo & output,const ArgMinMaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const699 bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
700                                           const TensorInfo& output,
701                                           const ArgMinMaxDescriptor& descriptor,
702                                           Optional<std::string&> reasonIfUnsupported) const
703 {
704 
705     FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
706                                    reasonIfUnsupported,
707                                    input,
708                                    output,
709                                    descriptor);
710 }
711 
IsBatchMatMulSupported(const TensorInfo & inputX,const TensorInfo & inputY,const TensorInfo & output,const BatchMatMulDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const712 bool ClLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX,
713                                             const TensorInfo& inputY,
714                                             const TensorInfo& output,
715                                             const BatchMatMulDescriptor& descriptor,
716                                             Optional<std::string&> reasonIfUnsupported) const
717 {
718     FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchMatMulValidate,
719                                    reasonIfUnsupported,
720                                    inputX,
721                                    inputY,
722                                    output,
723                                    descriptor);
724 }
725 
IsBatchNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & mean,const TensorInfo & var,const TensorInfo & beta,const TensorInfo & gamma,const BatchNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const726 bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
727                                                    const TensorInfo& output,
728                                                    const TensorInfo& mean,
729                                                    const TensorInfo& var,
730                                                    const TensorInfo& beta,
731                                                    const TensorInfo& gamma,
732                                                    const BatchNormalizationDescriptor& descriptor,
733                                                    Optional<std::string&> reasonIfUnsupported) const
734 {
735     FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
736                                    reasonIfUnsupported,
737                                    input,
738                                    output,
739                                    mean,
740                                    var,
741                                    beta,
742                                    gamma,
743                                    descriptor,
744                                    nullptr);
745 }
746 
IsBatchToSpaceNdSupported(const TensorInfo & input,const TensorInfo & output,const BatchToSpaceNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const747 bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
748                                                const TensorInfo& output,
749                                                const BatchToSpaceNdDescriptor& descriptor,
750                                                Optional<std::string&> reasonIfUnsupported) const
751 {
752     FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
753                                    reasonIfUnsupported,
754                                    input,
755                                    output,
756                                    descriptor);
757 }
758 
IsCastSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const759 bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
760                                      const TensorInfo& output,
761                                      Optional<std::string&> reasonIfUnsupported) const
762 {
763     FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
764                                    reasonIfUnsupported,
765                                    input,
766                                    output);
767 }
768 
IsChannelShuffleSupported(const TensorInfo & input,const TensorInfo & output,const ChannelShuffleDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const769 bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
770                                                const TensorInfo& output,
771                                                const ChannelShuffleDescriptor& descriptor,
772                                                Optional<std::string&> reasonIfUnsupported) const
773 {
774     FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
775                                    reasonIfUnsupported,
776                                    input,
777                                    output,
778                                    descriptor);
779 }
780 
IsComparisonSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ComparisonDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const781 bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
782                                            const TensorInfo& input1,
783                                            const TensorInfo& output,
784                                            const ComparisonDescriptor& descriptor,
785                                            Optional<std::string&> reasonIfUnsupported) const
786 {
787     FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
788                                    reasonIfUnsupported,
789                                    input0,
790                                    input1,
791                                    output,
792                                    descriptor);
793 }
794 
IsConcatSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const OriginsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const795 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
796                                        const TensorInfo& output,
797                                        const OriginsDescriptor& descriptor,
798                                        Optional<std::string&> reasonIfUnsupported) const
799 {
800     if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
801     {
802         SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
803         return false;
804     }
805 
806     unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
807     if(concatInnerAxis < 3) // Width, height, or channels
808     {
809         FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
810                                        reasonIfUnsupported,
811                                        inputs,
812                                        output,
813                                        descriptor);
814     }
815     else if (concatInnerAxis == 3)
816     {
817         // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
818         // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
819         for (auto& input : inputs)
820         {
821             if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
822             {
823                 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
824                 return false;
825             }
826         }
827         return true; // Sub-tensors support concat along batch
828     }
829     else // > 4 dimensions not supported.
830     {
831         SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
832         return false;
833     }
834 }
835 
IsConstantSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const836 bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
837                                          Optional<std::string&> reasonIfUnsupported) const
838 {
839     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
840                                    reasonIfUnsupported,
841                                    output);
842 }
843 
IsConvertFp16ToFp32Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const844 bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
845                                                   const TensorInfo& output,
846                                                   Optional<std::string&> reasonIfUnsupported) const
847 {
848     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
849                                    reasonIfUnsupported,
850                                    input,
851                                    output);
852 }
853 
IsConvertFp32ToFp16Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const854 bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
855                                                   const TensorInfo& output,
856                                                   Optional<std::string&> reasonIfUnsupported) const
857 {
858     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
859                                    reasonIfUnsupported,
860                                    input,
861                                    output);
862 }
863 
IsConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const864 bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
865                                               const TensorInfo& output,
866                                               const Convolution2dDescriptor& descriptor,
867                                               const TensorInfo& weights,
868                                               const Optional<TensorInfo>& biases,
869                                               Optional<std::string&> reasonIfUnsupported) const
870 {
871     bool isFastMathEnabled = false;
872 #if defined(ARMCOMPUTECL_ENABLED)
873     if (m_ModelContextPtr)
874     {
875         if (m_ModelContextPtr.get() != nullptr)
876         {
877             auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
878             if (modelOptions)
879             {
880                 isFastMathEnabled = modelOptions->IsFastMathEnabled();
881             }
882         }
883     }
884 #endif
885 
886     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
887                                    reasonIfUnsupported,
888                                    input,
889                                    output,
890                                    descriptor,
891                                    weights,
892                                    biases,
893                                    isFastMathEnabled,
894                                    nullptr);
895 }
896 
IsConvolution3dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution3dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const897 bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
898                                               const TensorInfo& output,
899                                               const Convolution3dDescriptor& descriptor,
900                                               const TensorInfo& weights,
901                                               const Optional<TensorInfo>& biases,
902                                               Optional<std::string&> reasonIfUnsupported) const
903 {
904     bool isFastMathEnabled = false;
905 #if defined(ARMCOMPUTECL_ENABLED)
906     if (m_ModelContextPtr)
907 {
908     if (m_ModelContextPtr.get() != nullptr)
909     {
910         auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
911         if (modelOptions)
912         {
913             isFastMathEnabled = modelOptions->IsFastMathEnabled();
914         }
915     }
916 }
917 #endif
918 
919     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
920                                    reasonIfUnsupported,
921                                    input,
922                                    output,
923                                    descriptor,
924                                    weights,
925                                    biases,
926                                    isFastMathEnabled,
927                                    nullptr);
928 }
929 
IsDequantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const930 bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
931                                            const TensorInfo& output,
932                                            Optional<std::string&> reasonIfUnsupported) const
933 {
934     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
935                                    reasonIfUnsupported,
936                                    input,
937                                    output);
938 }
939 
IsDepthToSpaceSupported(const TensorInfo & input,const TensorInfo & output,const DepthToSpaceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const940 bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
941                                              const TensorInfo& output,
942                                              const DepthToSpaceDescriptor& descriptor,
943                                              Optional<std::string&> reasonIfUnsupported) const
944 {
945     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
946                                    reasonIfUnsupported,
947                                    input,
948                                    output,
949                                    descriptor);
950 }
951 
IsDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const952 bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
953                                                      const TensorInfo& output,
954                                                      const DepthwiseConvolution2dDescriptor& descriptor,
955                                                      const TensorInfo& weights,
956                                                      const Optional<TensorInfo>& biases,
957                                                      Optional<std::string&> reasonIfUnsupported) const
958 {
959     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
960                                    reasonIfUnsupported,
961                                    input,
962                                    output,
963                                    descriptor,
964                                    weights,
965                                    biases,
966                                    nullptr);
967 }
968 
IsDilatedDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const969 bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
970                                                             const TensorInfo& output,
971                                                             const DepthwiseConvolution2dDescriptor& descriptor,
972                                                             const TensorInfo& weights,
973                                                             const Optional<TensorInfo>& biases,
974                                                             Optional<std::string&> reasonIfUnsupported) const
975 {
976     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
977                                    reasonIfUnsupported,
978                                    input,
979                                    output,
980                                    descriptor,
981                                    weights,
982                                    biases,
983                                    nullptr);
984 }
985 
986 
IsDivisionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const987 bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
988                                          const TensorInfo& input1,
989                                          const TensorInfo& output,
990                                          Optional<std::string&> reasonIfUnsupported) const
991 {
992     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
993                                    reasonIfUnsupported,
994                                    input0,
995                                    input1,
996                                    output,
997                                    nullptr);
998 }
999 
IsElementwiseUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1000 bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
1001                                                  const TensorInfo& output,
1002                                                  const ElementwiseUnaryDescriptor& descriptor,
1003                                                  Optional<std::string&> reasonIfUnsupported) const
1004 {
1005     switch(descriptor.m_Operation)
1006     {
1007         case UnaryOperation::Abs:
1008             FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
1009                                            reasonIfUnsupported,
1010                                            input,
1011                                            output);
1012         case UnaryOperation::Exp:
1013             FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
1014                                            reasonIfUnsupported,
1015                                            input,
1016                                            output);
1017         case UnaryOperation::Log:
1018             FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
1019                                            reasonIfUnsupported,
1020                                            input,
1021                                            output);
1022         case UnaryOperation::LogicalNot:
1023             FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
1024                                            reasonIfUnsupported,
1025                                            input,
1026                                            output);
1027         case UnaryOperation::Neg:
1028             FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
1029                                            reasonIfUnsupported,
1030                                            input,
1031                                            output);
1032         case UnaryOperation::Rsqrt:
1033             FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
1034                                            reasonIfUnsupported,
1035                                            input,
1036                                            output);
1037         case UnaryOperation::Sin:
1038             FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
1039                                            reasonIfUnsupported,
1040                                            input,
1041                                            output);
1042         case UnaryOperation::Sqrt:
1043             FORWARD_WORKLOAD_VALIDATE_FUNC(ClSqrtWorkloadValidate,
1044                                            reasonIfUnsupported,
1045                                            input,
1046                                            output);
1047         default:
1048             return false;
1049     }
1050 }
1051 
IsFillSupported(const TensorInfo & input,const TensorInfo & output,const FillDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1052 bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
1053                                      const TensorInfo& output,
1054                                      const FillDescriptor& descriptor,
1055                                      Optional<std::string&> reasonIfUnsupported) const
1056 {
1057     armnn::IgnoreUnused(input);
1058     armnn::IgnoreUnused(output);
1059     armnn::IgnoreUnused(descriptor);
1060 
1061     return IsClBackendSupported(reasonIfUnsupported);
1062 }
1063 
IsFloorSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1064 bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
1065                                       const TensorInfo& output,
1066                                       Optional<std::string&> reasonIfUnsupported) const
1067 {
1068     FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
1069                                    reasonIfUnsupported,
1070                                    input,
1071                                    output);
1072 }
1073 
IsFullyConnectedSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1074 bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
1075                                                const TensorInfo& output,
1076                                                const TensorInfo& weights,
1077                                                const TensorInfo& biases,
1078                                                const FullyConnectedDescriptor& descriptor,
1079                                                Optional<std::string&> reasonIfUnsupported) const
1080 {
1081     FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
1082                                    reasonIfUnsupported,
1083                                    input,
1084                                    output,
1085                                    weights,
1086                                    biases,
1087                                    descriptor,
1088                                    nullptr);
1089 }
1090 
IsGatherSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const GatherDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1091 bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1092                                        const TensorInfo& input1,
1093                                        const TensorInfo& output,
1094                                        const GatherDescriptor& descriptor,
1095                                        Optional<std::string&> reasonIfUnsupported) const
1096 {
1097     FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1098                                    reasonIfUnsupported,
1099                                    input0,
1100                                    input1,
1101                                    output,
1102                                    descriptor);
1103 }
1104 
IsGatherNdSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1105 bool ClLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1106                                          const TensorInfo& input1,
1107                                          const TensorInfo& output,
1108                                          Optional<std::string&> reasonIfUnsupported) const
1109 {
1110     FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherNdWorkloadValidate,
1111                                    reasonIfUnsupported,
1112                                    input0,
1113                                    input1,
1114                                    output);
1115 }
1116 
IsInputSupported(const TensorInfo & input,Optional<std::string &> reasonIfUnsupported) const1117 bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1118                                       Optional<std::string&> reasonIfUnsupported) const
1119 {
1120     return IsClBackendSupported(reasonIfUnsupported, input);
1121 }
1122 
IsInstanceNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const InstanceNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1123 bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1124                                                       const TensorInfo& output,
1125                                                       const InstanceNormalizationDescriptor& descriptor,
1126                                                       Optional<std::string&> reasonIfUnsupported) const
1127 {
1128     FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1129                                    reasonIfUnsupported,
1130                                    input,
1131                                    output,
1132                                    descriptor);
1133 }
1134 
IsL2NormalizationSupported(const TensorInfo & input,const TensorInfo & output,const L2NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1135 bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1136                                                 const TensorInfo& output,
1137                                                 const L2NormalizationDescriptor& descriptor,
1138                                                 Optional<std::string&> reasonIfUnsupported) const
1139 {
1140     FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1141                                    reasonIfUnsupported,
1142                                    input,
1143                                    output,
1144                                    descriptor);
1145 }
1146 
IsLogicalBinarySupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const LogicalBinaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1147 bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1148                                               const TensorInfo& input1,
1149                                               const TensorInfo& output,
1150                                               const LogicalBinaryDescriptor& descriptor,
1151                                               Optional<std::string&> reasonIfUnsupported) const
1152 {
1153     IgnoreUnused(output);
1154 
1155     switch(descriptor.m_Operation)
1156     {
1157         case LogicalBinaryOperation::LogicalAnd:
1158             FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1159                                            reasonIfUnsupported,
1160                                            input0,
1161                                            input1,
1162                                            output);
1163         case LogicalBinaryOperation::LogicalOr:
1164             FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1165                                            reasonIfUnsupported,
1166                                            input0,
1167                                            input1,
1168                                            output);
1169         default:
1170             return false;
1171     }
1172 }
1173 
1174 
IsLogSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const LogSoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1175 bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1176                                                 const TensorInfo& output,
1177                                                 const LogSoftmaxDescriptor& descriptor,
1178                                                 Optional<std::string&> reasonIfUnsupported) const
1179 {
1180     FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1181                                    reasonIfUnsupported,
1182                                    input,
1183                                    output,
1184                                    descriptor);
1185 }
1186 
IsLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & scratchBuffer,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1187 bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1188                                      const TensorInfo& outputStateIn,
1189                                      const TensorInfo& cellStateIn,
1190                                      const TensorInfo& scratchBuffer,
1191                                      const TensorInfo& outputStateOut,
1192                                      const TensorInfo& cellStateOut,
1193                                      const TensorInfo& output,
1194                                      const LstmDescriptor& descriptor,
1195                                      const LstmInputParamsInfo& paramsInfo,
1196                                      Optional<std::string&> reasonIfUnsupported) const
1197 {
1198     FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1199                                    reasonIfUnsupported,
1200                                    input,
1201                                    outputStateIn,
1202                                    cellStateIn,
1203                                    scratchBuffer,
1204                                    outputStateOut,
1205                                    cellStateOut,
1206                                    output,
1207                                    descriptor,
1208                                    paramsInfo);
1209 }
1210 
IsMaximumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1211 bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1212                                         const TensorInfo& input1,
1213                                         const TensorInfo& output,
1214                                         Optional<std::string&> reasonIfUnsupported) const
1215 {
1216     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1217                                    reasonIfUnsupported,
1218                                    input0,
1219                                    input1,
1220                                    output);
1221 }
1222 
IsMeanSupported(const TensorInfo & input,const TensorInfo & output,const MeanDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1223 bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1224                                      const TensorInfo& output,
1225                                      const MeanDescriptor& descriptor,
1226                                      Optional<std::string&> reasonIfUnsupported) const
1227 {
1228     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1229                                    reasonIfUnsupported,
1230                                    input,
1231                                    output,
1232                                    descriptor);
1233 }
1234 
IsMinimumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1235 bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1236                                         const TensorInfo& input1,
1237                                         const TensorInfo& output,
1238                                         Optional<std::string&> reasonIfUnsupported) const
1239 {
1240     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1241                                    reasonIfUnsupported,
1242                                    input0,
1243                                    input1,
1244                                    output);
1245 }
1246 
IsMultiplicationSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1247 bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1248                                                const TensorInfo& input1,
1249                                                const TensorInfo& output,
1250                                                Optional<std::string&> reasonIfUnsupported) const
1251 {
1252     FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1253                                    reasonIfUnsupported,
1254                                    input0,
1255                                    input1,
1256                                    output,
1257                                    nullptr);
1258 }
1259 
IsNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1260 bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1261                                               const TensorInfo& output,
1262                                               const NormalizationDescriptor& descriptor,
1263                                               Optional<std::string&> reasonIfUnsupported) const
1264 {
1265     FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1266 }
1267 
IsOutputSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1268 bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1269                                        Optional<std::string&> reasonIfUnsupported) const
1270 {
1271     return IsClBackendSupported(reasonIfUnsupported, output);
1272 }
1273 
IsPadSupported(const TensorInfo & input,const TensorInfo & output,const PadDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1274 bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1275                                     const TensorInfo& output,
1276                                     const PadDescriptor& descriptor,
1277                                     Optional<std::string&> reasonIfUnsupported) const
1278 {
1279     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1280                                    reasonIfUnsupported,
1281                                    input,
1282                                    output,
1283                                    descriptor);
1284 }
1285 
IsPermuteSupported(const TensorInfo & input,const TensorInfo & output,const PermuteDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1286 bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1287                                         const TensorInfo& output,
1288                                         const PermuteDescriptor& descriptor,
1289                                         Optional<std::string&> reasonIfUnsupported) const
1290 {
1291     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1292 }
1293 
IsPooling2dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling2dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1294 bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1295                                           const TensorInfo& output,
1296                                           const Pooling2dDescriptor& descriptor,
1297                                           Optional<std::string&> reasonIfUnsupported) const
1298 {
1299     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1300 }
1301 
IsPooling3dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling3dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1302 bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1303                                           const TensorInfo& output,
1304                                           const Pooling3dDescriptor& descriptor,
1305                                           Optional<std::string&> reasonIfUnsupported) const
1306 {
1307     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1308 }
1309 
IsPreluSupported(const armnn::TensorInfo & input,const armnn::TensorInfo & alpha,const armnn::TensorInfo & output,armnn::Optional<std::string &> reasonIfUnsupported) const1310 bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1311                                       const armnn::TensorInfo &alpha,
1312                                       const armnn::TensorInfo &output,
1313                                       armnn::Optional<std::string &> reasonIfUnsupported) const
1314 {
1315     FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1316 }
1317 
IsQLstmSupported(const TensorInfo & input,const TensorInfo & previousOutputIn,const TensorInfo & previousCellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const QLstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1318 bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1319                                       const TensorInfo& previousOutputIn,
1320                                       const TensorInfo& previousCellStateIn,
1321                                       const TensorInfo& outputStateOut,
1322                                       const TensorInfo& cellStateOut,
1323                                       const TensorInfo& output,
1324                                       const QLstmDescriptor& descriptor,
1325                                       const LstmInputParamsInfo& paramsInfo,
1326                                       Optional<std::string&> reasonIfUnsupported) const
1327 {
1328     if (input.GetDataType()               == armnn::DataType::QAsymmS8 &&
1329         previousOutputIn.GetDataType()    == armnn::DataType::QAsymmS8 &&
1330         previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1331         outputStateOut.GetDataType()      == armnn::DataType::QAsymmS8 &&
1332         cellStateOut.GetDataType()        == armnn::DataType::QSymmS16 &&
1333         output.GetDataType()              == armnn::DataType::QAsymmS8)
1334     {
1335         FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1336                                        reasonIfUnsupported,
1337                                        input,
1338                                        previousCellStateIn,
1339                                        previousOutputIn,
1340                                        cellStateOut,
1341                                        outputStateOut,
1342                                        output,
1343                                        descriptor,
1344                                        paramsInfo);
1345     }
1346     else
1347     {
1348         return false;
1349     }
1350 }
1351 
IsQuantizedLstmSupported(const TensorInfo & input,const TensorInfo & previousCellStateIn,const TensorInfo & previousOutputIn,const TensorInfo & cellStateOut,const TensorInfo & output,const QuantizedLstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1352 bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1353                                               const TensorInfo& previousCellStateIn,
1354                                               const TensorInfo& previousOutputIn,
1355                                               const TensorInfo& cellStateOut,
1356                                               const TensorInfo& output,
1357                                               const QuantizedLstmInputParamsInfo& paramsInfo,
1358                                               Optional<std::string&> reasonIfUnsupported) const
1359 {
1360     FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1361                                    reasonIfUnsupported,
1362                                    input,
1363                                    previousCellStateIn,
1364                                    previousOutputIn,
1365                                    cellStateOut,
1366                                    output,
1367                                    paramsInfo);
1368 }
1369 
IsQuantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1370 bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1371                                          const TensorInfo& output,
1372                                          Optional<std::string&> reasonIfUnsupported) const
1373 {
1374     FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1375                                    reasonIfUnsupported,
1376                                    input,
1377                                    output);
1378 }
1379 
IsReduceSupported(const TensorInfo & input,const TensorInfo & output,const ReduceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1380 bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1381                                        const TensorInfo& output,
1382                                        const ReduceDescriptor& descriptor,
1383                                        Optional<std::string&> reasonIfUnsupported) const
1384 {
1385     FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1386                                    reasonIfUnsupported,
1387                                    input,
1388                                    output,
1389                                    descriptor);
1390 }
1391 
IsReshapeSupported(const TensorInfo & input,const TensorInfo & output,const ReshapeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1392 bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
1393                                         const TensorInfo& output,
1394                                         const ReshapeDescriptor& descriptor,
1395                                         Optional<std::string&> reasonIfUnsupported) const
1396 {
1397     IgnoreUnused(descriptor);
1398     FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
1399 }
1400 
IsResizeSupported(const TensorInfo & input,const TensorInfo & output,const ResizeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1401 bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1402                                        const TensorInfo& output,
1403                                        const ResizeDescriptor& descriptor,
1404                                        Optional<std::string&> reasonIfUnsupported) const
1405 {
1406     FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1407 }
1408 
IsSliceSupported(const TensorInfo & input,const TensorInfo & output,const SliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1409 bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1410                                       const TensorInfo& output,
1411                                       const SliceDescriptor& descriptor,
1412                                       Optional<std::string&> reasonIfUnsupported) const
1413 {
1414     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1415 }
1416 
IsSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const SoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1417 bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1418                                         const TensorInfo& output,
1419                                         const SoftmaxDescriptor& descriptor,
1420                                         Optional<std::string&> reasonIfUnsupported) const
1421 {
1422     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1423 }
1424 
IsSpaceToBatchNdSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToBatchNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1425 bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1426                                                const TensorInfo& output,
1427                                                const SpaceToBatchNdDescriptor& descriptor,
1428                                                Optional<std::string&> reasonIfUnsupported) const
1429 {
1430     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1431                                    reasonIfUnsupported,
1432                                    input,
1433                                    output,
1434                                    descriptor);
1435 }
1436 
IsSpaceToDepthSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToDepthDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1437 bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1438                                              const TensorInfo& output,
1439                                              const SpaceToDepthDescriptor& descriptor,
1440                                              Optional<std::string&> reasonIfUnsupported) const
1441 {
1442     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1443                                    reasonIfUnsupported,
1444                                    input,
1445                                    output,
1446                                    descriptor);
1447 }
1448 
IsSplitterSupported(const TensorInfo & input,const std::vector<std::reference_wrapper<TensorInfo>> & outputs,const ViewsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1449 bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
1450                                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1451                                          const ViewsDescriptor& descriptor,
1452                                          Optional<std::string&> reasonIfUnsupported) const
1453 {
1454 #if defined(ARMCOMPUTECL_ENABLED)
1455     // Split along the last dimension, cannot use sub-tensors
1456     // as width and height of the sub-tensors do not match
1457     // the width and height of the parent tensor
1458     // in case of input with more than 2D.
1459     std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1460     if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1461         *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1462     {
1463         FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1464                                        reasonIfUnsupported,
1465                                        input,
1466                                        outputs,
1467                                        *splitAxis.begin());
1468     }
1469 #endif
1470     IgnoreUnused(descriptor);
1471     for (auto output : outputs)
1472     {
1473         if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1474         {
1475             SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1476             return false;
1477         }
1478     }
1479     return true;
1480 }
1481 
IsStackSupported(const std::vector<const TensorInfo * > & inputs,const TensorInfo & output,const StackDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1482 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1483                                       const TensorInfo& output,
1484                                       const StackDescriptor& descriptor,
1485                                       Optional<std::string&> reasonIfUnsupported) const
1486 {
1487     FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1488                                    reasonIfUnsupported,
1489                                    inputs,
1490                                    output,
1491                                    descriptor);
1492 }
1493 
IsStridedSliceSupported(const TensorInfo & input,const TensorInfo & output,const StridedSliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1494 bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1495                                              const TensorInfo& output,
1496                                              const StridedSliceDescriptor& descriptor,
1497                                              Optional<std::string&> reasonIfUnsupported) const
1498 {
1499     FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1500                                    reasonIfUnsupported,
1501                                    input,
1502                                    output,
1503                                    descriptor);
1504 }
1505 
IsSubtractionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const1506 bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1507                                             const TensorInfo& input1,
1508                                             const TensorInfo& output,
1509                                             Optional<std::string&> reasonIfUnsupported) const
1510 {
1511     FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1512                                    reasonIfUnsupported,
1513                                    input0,
1514                                    input1,
1515                                    output,
1516                                    nullptr);
1517 }
1518 
IsTransposeConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const TransposeConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const1519 bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1520                                                        const TensorInfo& output,
1521                                                        const TransposeConvolution2dDescriptor& descriptor,
1522                                                        const TensorInfo& weights,
1523                                                        const Optional<TensorInfo>& biases,
1524                                                        Optional<std::string&> reasonIfUnsupported) const
1525 {
1526     FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1527                                    reasonIfUnsupported,
1528                                    input,
1529                                    output,
1530                                    descriptor,
1531                                    weights,
1532                                    biases);
1533 }
1534 
IsTransposeSupported(const TensorInfo & input,const TensorInfo & output,const TransposeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const1535 bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1536                                           const TensorInfo& output,
1537                                           const TransposeDescriptor& descriptor,
1538                                           Optional<std::string&> reasonIfUnsupported) const
1539 {
1540     FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1541 }
1542 
IsUnidirectionalSequenceLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const UnidirectionalSequenceLstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const1543 bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1544                                                            const TensorInfo& outputStateIn,
1545                                                            const TensorInfo& cellStateIn,
1546                                                            const TensorInfo& outputStateOut,
1547                                                            const TensorInfo& cellStateOut,
1548                                                            const TensorInfo& output,
1549                                                            const UnidirectionalSequenceLstmDescriptor& descriptor,
1550                                                            const LstmInputParamsInfo& paramsInfo,
1551                                                            Optional<std::string&> reasonIfUnsupported) const
1552 {
1553     FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1554                                    reasonIfUnsupported,
1555                                    input,
1556                                    outputStateIn,
1557                                    cellStateIn,
1558                                    outputStateOut,
1559                                    cellStateOut,
1560                                    output,
1561                                    descriptor,
1562                                    paramsInfo);
1563 }
1564 
1565 } // namespace armnn
1566