1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <armnn/BackendHelper.hpp>
7 #include <armnn/BackendId.hpp>
8 #include <armnn/BackendOptions.hpp>
9 #include <armnn/BackendRegistry.hpp>
10 #include <armnn/LstmParams.hpp>
11 #include <armnn/QuantizedLstmParams.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
14 #include <armnn/backends/ILayerSupport.hpp>
15 #include <armnn/backends/IBackendInternal.hpp>
16
17 #include <stddef.h>
18
19 namespace armnn
20 {
21
22 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
GetILayerSupportByBackendId(const armnn::BackendId & backend)23 LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend)
24 {
25 BackendRegistry& backendRegistry = armnn::BackendRegistryInstance();
26
27 if (!backendRegistry.IsBackendRegistered(backend))
28 {
29 return LayerSupportHandle(nullptr);
30 }
31
32 auto factoryFunc = backendRegistry.GetFactory(backend);
33 auto backendObject = factoryFunc();
34 return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
35 }
36
GetCapability(const std::string & backendCapabilityName,const BackendCapabilities & capabilities)37 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
38 const BackendCapabilities& capabilities)
39 {
40 for (size_t i=0; i < capabilities.GetOptionCount(); i++)
41 {
42 const auto& capability = capabilities.GetOption(i);
43 if (backendCapabilityName == capability.GetName())
44 {
45 return capability;
46 }
47 }
48 return EmptyOptional();
49 }
50
GetCapability(const std::string & backendCapabilityName,const armnn::BackendId & backend)51 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
52 const armnn::BackendId& backend)
53 {
54 auto const& backendRegistry = armnn::BackendRegistryInstance();
55 if (backendRegistry.IsBackendRegistered(backend))
56 {
57 auto factoryFunc = backendRegistry.GetFactory(backend);
58 auto backendObject = factoryFunc();
59 auto capabilities = backendObject->GetCapabilities();
60 return GetCapability(backendCapabilityName, capabilities);
61 }
62 return EmptyOptional();
63 }
64
HasCapability(const std::string & name,const BackendCapabilities & capabilities)65 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
66 {
67 return GetCapability(name, capabilities).has_value();
68 }
69
HasCapability(const std::string & name,const armnn::BackendId & backend)70 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
71 {
72 return GetCapability(name, backend).has_value();
73 }
74
HasCapability(const BackendOptions::BackendOption & capability,const BackendCapabilities & capabilities)75 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
76 {
77 for (size_t i=0; i < capabilities.GetOptionCount(); i++)
78 {
79 const auto& backendCapability = capabilities.GetOption(i);
80 if (capability.GetName() == backendCapability.GetName())
81 {
82 if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
83 {
84 return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
85 }
86 else if (capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
87 {
88 return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
89 }
90 else if (capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
91 {
92 return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
93 }
94 else if (capability.GetValue().IsString() && backendCapability.GetValue().IsString())
95 {
96 return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
97 }
98 else if (capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
99 {
100 return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
101 }
102 }
103 }
104 return false;
105 }
106
HasCapability(const BackendOptions::BackendOption & backendOption,const armnn::BackendId & backend)107 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
108 {
109 auto const& backendRegistry = armnn::BackendRegistryInstance();
110 if (backendRegistry.IsBackendRegistered(backend))
111 {
112 auto factoryFunc = backendRegistry.GetFactory(backend);
113 auto backendObject = factoryFunc();
114 auto capabilities = backendObject->GetCapabilities();
115 return HasCapability(backendOption, capabilities);
116 }
117 return false;
118 }
119
GetNumberOfCacheFiles(const armnn::BackendId & backend)120 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
121 {
122 auto const& backendRegistry = armnn::BackendRegistryInstance();
123 if (backendRegistry.IsBackendRegistered(backend))
124 {
125 auto factoryFunc = backendRegistry.GetFactory(backend);
126 auto backendObject = factoryFunc();
127 return backendObject->GetNumberOfCacheFiles();
128 }
129 return 0;
130 }
131
IsBackendRegistered() const132 bool LayerSupportHandle::IsBackendRegistered() const
133 {
134 if (m_LayerSupport)
135 {
136 return true;
137 }
138
139 return false;
140 }
141
142 using TensorInfos = std::vector<TensorInfo>;
143
IsActivationSupported(const TensorInfo & input,const TensorInfo & output,const ActivationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)144 bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
145 const TensorInfo& output,
146 const ActivationDescriptor& descriptor,
147 Optional<std::string&> reasonIfUnsupported)
148 {
149 TensorInfos infos{input, output};
150
151 return m_LayerSupport->IsLayerSupported(LayerType::Activation,
152 infos,
153 descriptor,
154 EmptyOptional(),
155 EmptyOptional(),
156 reasonIfUnsupported);
157 }
158
IsAdditionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)159 bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
160 const TensorInfo& input1,
161 const TensorInfo& output,
162 Optional<std::string&> reasonIfUnsupported)
163 {
164 TensorInfos infos{input0, input1, output};
165
166 return m_LayerSupport->IsLayerSupported(LayerType::Addition,
167 infos,
168 BaseDescriptor(),
169 EmptyOptional(),
170 EmptyOptional(),
171 reasonIfUnsupported);
172 }
173
IsArgMinMaxSupported(const TensorInfo & input,const TensorInfo & output,const ArgMinMaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)174 bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
175 const TensorInfo& output,
176 const ArgMinMaxDescriptor& descriptor,
177 Optional<std::string&> reasonIfUnsupported)
178 {
179 TensorInfos infos{input, output};
180
181 return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
182 infos,
183 descriptor,
184 EmptyOptional(),
185 EmptyOptional(),
186 reasonIfUnsupported);
187 }
188
IsBatchMatMulSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const BatchMatMulDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)189 bool LayerSupportHandle::IsBatchMatMulSupported(const TensorInfo& input0,
190 const TensorInfo& input1,
191 const TensorInfo& output,
192 const BatchMatMulDescriptor& descriptor,
193 Optional<std::string&> reasonIfUnsupported)
194 {
195 TensorInfos infos{input0, input1, output};
196
197 return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
198 infos,
199 descriptor,
200 EmptyOptional(),
201 EmptyOptional(),
202 reasonIfUnsupported);
203 }
204
IsBatchNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & mean,const TensorInfo & var,const TensorInfo & beta,const TensorInfo & gamma,const BatchNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)205 bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
206 const TensorInfo& output,
207 const TensorInfo& mean,
208 const TensorInfo& var,
209 const TensorInfo& beta,
210 const TensorInfo& gamma,
211 const BatchNormalizationDescriptor& descriptor,
212 Optional<std::string&> reasonIfUnsupported)
213 {
214 TensorInfos infos{input, output, mean, var, beta, gamma};
215
216 return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
217 infos,
218 descriptor,
219 EmptyOptional(),
220 EmptyOptional(),
221 reasonIfUnsupported);
222 }
223
IsBatchToSpaceNdSupported(const TensorInfo & input,const TensorInfo & output,const BatchToSpaceNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)224 bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
225 const TensorInfo& output,
226 const BatchToSpaceNdDescriptor& descriptor,
227 Optional<std::string&> reasonIfUnsupported)
228 {
229 TensorInfos infos{input, output};
230
231 return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
232 infos,
233 descriptor,
234 EmptyOptional(),
235 EmptyOptional(),
236 reasonIfUnsupported);
237 }
238
IsCastSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)239 bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
240 const TensorInfo& output,
241 Optional<std::string&> reasonIfUnsupported)
242 {
243 TensorInfos infos{input, output};
244
245 return m_LayerSupport->IsLayerSupported(LayerType::Cast,
246 infos,
247 BaseDescriptor(),
248 EmptyOptional(),
249 EmptyOptional(),
250 reasonIfUnsupported);
251 }
252
IsChannelShuffleSupported(const TensorInfo & input,const TensorInfo & output,const ChannelShuffleDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)253 bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input,
254 const TensorInfo &output,
255 const ChannelShuffleDescriptor &descriptor,
256 Optional<std::string &> reasonIfUnsupported)
257 {
258 TensorInfos infos{input, output};
259
260 return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
261 infos,
262 descriptor,
263 EmptyOptional(),
264 EmptyOptional(),
265 reasonIfUnsupported);
266 }
267
IsComparisonSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ComparisonDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)268 bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
269 const TensorInfo& input1,
270 const TensorInfo& output,
271 const ComparisonDescriptor& descriptor,
272 Optional<std::string&> reasonIfUnsupported)
273 {
274 TensorInfos infos{input0, input1, output};
275
276 return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
277 infos,
278 descriptor,
279 EmptyOptional(),
280 EmptyOptional(),
281 reasonIfUnsupported);
282 }
283
IsConcatSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const OriginsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)284 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
285 const TensorInfo& output,
286 const OriginsDescriptor& descriptor,
287 Optional<std::string&> reasonIfUnsupported)
288 {
289 TensorInfos infos;
290 for (const TensorInfo* inputInfo : inputs)
291 {
292 infos.push_back(*inputInfo);
293 }
294 infos.push_back(output);
295
296 return m_LayerSupport->IsLayerSupported(LayerType::Concat,
297 infos,
298 descriptor,
299 EmptyOptional(),
300 EmptyOptional(),
301 reasonIfUnsupported);
302 }
303
IsConstantSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)304 bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
305 Optional<std::string&> reasonIfUnsupported)
306 {
307 TensorInfos infos{output};
308
309 return m_LayerSupport->IsLayerSupported(LayerType::Constant,
310 infos,
311 BaseDescriptor(),
312 EmptyOptional(),
313 EmptyOptional(),
314 reasonIfUnsupported);
315 }
316
IsConvertFp16ToFp32Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)317 bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
318 const TensorInfo& output,
319 Optional<std::string&> reasonIfUnsupported)
320 {
321 TensorInfos infos{input, output};
322
323 return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
324 infos,
325 BaseDescriptor(),
326 EmptyOptional(),
327 EmptyOptional(),
328 reasonIfUnsupported);
329 }
330
IsConvertFp32ToFp16Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)331 bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
332 const TensorInfo& output,
333 Optional<std::string&> reasonIfUnsupported)
334 {
335 TensorInfos infos{input, output};
336
337 return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
338 infos,
339 BaseDescriptor(),
340 EmptyOptional(),
341 EmptyOptional(),
342 reasonIfUnsupported);
343 }
344
IsConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported)345 bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
346 const TensorInfo& output,
347 const Convolution2dDescriptor& descriptor,
348 const TensorInfo& weights,
349 const Optional<TensorInfo>& biases,
350 Optional<std::string&> reasonIfUnsupported)
351 {
352 TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
353 TensorInfos infos{input, output, weights, biasesVal};
354
355 Optional<const BackendOptions::BackendOption> capability ;
356 if (!m_BackendId.IsUndefined())
357 {
358 capability = GetCapability("NonConstWeights", m_BackendId);
359 if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
360 {
361 if (!weights.IsConstant())
362 {
363 if (reasonIfUnsupported.has_value())
364 {
365 reasonIfUnsupported.value() =
366 "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
367 "Convolution2d weights are set as dynamic (non constant). ";
368 }
369 return false;
370 }
371 if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
372 {
373 if (reasonIfUnsupported.has_value())
374 {
375 reasonIfUnsupported.value() =
376 "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
377 "Convolution2d biases are set as dynamic (non constant). ";
378 }
379 return false;
380 }
381
382 // At the first stage we will only print a warning. this is to give
383 // backend developers a chance to adopt and read weights from input slots.
384 ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
385 "If you are a backend developer please find more information in our "
386 "doxygen documentation on github https://github.com/ARM-software/armnn "
387 "under the keyword 'ConstTensorsAsInputs'.";
388 }
389 }
390
391 return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
392 infos,
393 descriptor,
394 EmptyOptional(),
395 EmptyOptional(),
396 reasonIfUnsupported);
397 }
398
IsConvolution3dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution3dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported)399 bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
400 const TensorInfo& output,
401 const Convolution3dDescriptor& descriptor,
402 const TensorInfo& weights,
403 const Optional<TensorInfo>& biases,
404 Optional<std::string&> reasonIfUnsupported)
405 {
406 TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
407 TensorInfos infos{input, output, weights, biasesVal};
408
409 return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
410 infos,
411 descriptor,
412 EmptyOptional(),
413 EmptyOptional(),
414 reasonIfUnsupported);
415 }
416
IsDebugSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)417 bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
418 const TensorInfo& output,
419 Optional<std::string&> reasonIfUnsupported)
420 {
421 TensorInfos infos{input, output};
422
423 return m_LayerSupport->IsLayerSupported(LayerType::Debug,
424 infos,
425 BaseDescriptor(),
426 EmptyOptional(),
427 EmptyOptional(),
428 reasonIfUnsupported);
429 }
430
IsDepthToSpaceSupported(const TensorInfo & input,const TensorInfo & output,const DepthToSpaceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)431 bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
432 const TensorInfo& output,
433 const DepthToSpaceDescriptor& descriptor,
434 Optional<std::string&> reasonIfUnsupported)
435 {
436 TensorInfos infos{input, output};
437
438 return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
439 infos,
440 descriptor,
441 EmptyOptional(),
442 EmptyOptional(),
443 reasonIfUnsupported);
444 }
445
IsDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported)446 bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
447 const TensorInfo& input,
448 const TensorInfo& output,
449 const DepthwiseConvolution2dDescriptor& descriptor,
450 const TensorInfo& weights,
451 const Optional<TensorInfo>& biases,
452 Optional<std::string&> reasonIfUnsupported)
453 {
454 TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
455 TensorInfos infos{input, output, weights, biasesVal};
456
457 Optional<const BackendOptions::BackendOption> capability ;
458 if (!m_BackendId.IsUndefined())
459 {
460 capability = GetCapability("NonConstWeights", m_BackendId);
461 if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
462 {
463 if (!weights.IsConstant())
464 {
465 if (reasonIfUnsupported.has_value())
466 {
467 reasonIfUnsupported.value() =
468 "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
469 "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
470 }
471 return false;
472 }
473 if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
474 {
475 if (reasonIfUnsupported.has_value())
476 {
477 reasonIfUnsupported.value() =
478 "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
479 "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
480 }
481 return false;
482 }
483 // At the first stage we will only print a warning. this is to give
484 // backend developers a chance to adopt and read weights from input slots.
485 ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
486 "If you are a backend developer please find more information in our "
487 "doxygen documentation on github https://github.com/ARM-software/armnn "
488 "under the keyword 'ConstTensorsAsInputs'.";
489 }
490 }
491
492 return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
493 infos,
494 descriptor,
495 EmptyOptional(),
496 EmptyOptional(),
497 reasonIfUnsupported);
498 }
499
IsDequantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)500 bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
501 const TensorInfo& output,
502 Optional<std::string&> reasonIfUnsupported)
503 {
504 TensorInfos infos{input, output};
505
506 return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
507 infos,
508 BaseDescriptor(),
509 EmptyOptional(),
510 EmptyOptional(),
511 reasonIfUnsupported);
512 }
513
IsDetectionPostProcessSupported(const TensorInfo & boxEncodings,const TensorInfo & scores,const TensorInfo & anchors,const TensorInfo & detectionBoxes,const TensorInfo & detectionClasses,const TensorInfo & detectionScores,const TensorInfo & numDetections,const DetectionPostProcessDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)514 bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
515 const TensorInfo& scores,
516 const TensorInfo& anchors,
517 const TensorInfo& detectionBoxes,
518 const TensorInfo& detectionClasses,
519 const TensorInfo& detectionScores,
520 const TensorInfo& numDetections,
521 const DetectionPostProcessDescriptor& descriptor,
522 Optional<std::string&> reasonIfUnsupported)
523 {
524 TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
525
526 return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
527 infos,
528 descriptor,
529 EmptyOptional(),
530 EmptyOptional(),
531 reasonIfUnsupported);
532 }
533
IsDilatedDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported)534 bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
535 const TensorInfo& input,
536 const TensorInfo& output,
537 const DepthwiseConvolution2dDescriptor& descriptor,
538 const TensorInfo& weights,
539 const Optional<TensorInfo>& biases,
540 Optional<std::string&> reasonIfUnsupported)
541 {
542 TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
543 TensorInfos infos{input, output, weights, biasesVal};
544
545 Optional<const BackendOptions::BackendOption> capability ;
546 if (!m_BackendId.IsUndefined())
547 {
548 capability = GetCapability("NonConstWeights", m_BackendId);
549 if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
550 {
551 if (!weights.IsConstant())
552 {
553 if (reasonIfUnsupported.has_value())
554 {
555 reasonIfUnsupported.value() =
556 "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
557 "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
558 }
559 return false;
560 }
561 if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
562 {
563 if (reasonIfUnsupported.has_value())
564 {
565 reasonIfUnsupported.value() =
566 "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
567 "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
568 }
569 return false;
570 }
571 // At the first stage we will only print a warning. this is to give
572 // backend developers a chance to adopt and read weights from input slots.
573 ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
574 "If you are a backend developer please find more information in our "
575 "doxygen documentation on github https://github.com/ARM-software/armnn "
576 "under the keyword 'ConstTensorsAsInputs'.";
577 }
578 }
579
580 return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
581 infos,
582 descriptor,
583 EmptyOptional(),
584 EmptyOptional(),
585 reasonIfUnsupported);
586 }
587
IsDivisionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)588 bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
589 const TensorInfo& input1,
590 const TensorInfo& output,
591 Optional<std::string&> reasonIfUnsupported)
592 {
593 TensorInfos infos{input0, input1, output};
594
595 return m_LayerSupport->IsLayerSupported(LayerType::Division,
596 infos,
597 BaseDescriptor(),
598 EmptyOptional(),
599 EmptyOptional(),
600 reasonIfUnsupported);
601 }
602
IsElementwiseBinarySupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ElementwiseBinaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)603 bool LayerSupportHandle::IsElementwiseBinarySupported(const TensorInfo &input0,
604 const TensorInfo &input1,
605 const TensorInfo &output,
606 const ElementwiseBinaryDescriptor &descriptor,
607 Optional<std::string &> reasonIfUnsupported)
608 {
609 TensorInfos infos{input0, input1, output};
610
611 return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
612 infos,
613 descriptor,
614 EmptyOptional(),
615 EmptyOptional(),
616 reasonIfUnsupported);
617 }
618
IsElementwiseUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)619 bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
620 const TensorInfo& output,
621 const ElementwiseUnaryDescriptor& descriptor,
622 Optional<std::string&> reasonIfUnsupported)
623 {
624 TensorInfos infos{input, output};
625
626 return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
627 infos,
628 descriptor,
629 EmptyOptional(),
630 EmptyOptional(),
631 reasonIfUnsupported);
632 }
633
IsFakeQuantizationSupported(const TensorInfo & input,const FakeQuantizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)634 bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
635 const FakeQuantizationDescriptor& descriptor,
636 Optional<std::string&> reasonIfUnsupported)
637 {
638 TensorInfos infos{input};
639
640 return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
641 infos,
642 descriptor,
643 EmptyOptional(),
644 EmptyOptional(),
645 reasonIfUnsupported);
646 }
647
IsFillSupported(const TensorInfo & input,const TensorInfo & output,const FillDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)648 bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
649 const TensorInfo& output,
650 const FillDescriptor& descriptor,
651 Optional<std::string&> reasonIfUnsupported)
652 {
653 TensorInfos infos{input, output};
654
655 return m_LayerSupport->IsLayerSupported(LayerType::Fill,
656 infos,
657 descriptor,
658 EmptyOptional(),
659 EmptyOptional(),
660 reasonIfUnsupported);
661 }
662
IsFloorSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)663 bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
664 const TensorInfo& output,
665 Optional<std::string&> reasonIfUnsupported)
666 {
667 TensorInfos infos{input, output};
668
669 return m_LayerSupport->IsLayerSupported(LayerType::Floor,
670 infos,
671 BaseDescriptor(),
672 EmptyOptional(),
673 EmptyOptional(),
674 reasonIfUnsupported);
675 }
676
IsFullyConnectedSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)677 bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
678 const TensorInfo& output,
679 const TensorInfo& weights,
680 const TensorInfo& biases,
681 const FullyConnectedDescriptor& descriptor,
682 Optional<std::string&> reasonIfUnsupported)
683 {
684 TensorInfos infos{input, output, weights, biases};
685
686 Optional<const BackendOptions::BackendOption> capability;
687 if (!m_BackendId.IsUndefined())
688 {
689 capability = GetCapability("NonConstWeights", m_BackendId);
690 if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
691 {
692 if (!descriptor.m_ConstantWeights)
693 {
694 if (reasonIfUnsupported.has_value())
695 {
696 reasonIfUnsupported.value() =
697 "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
698 "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
699 }
700 return false;
701 }
702 if (!weights.IsConstant())
703 {
704 if (reasonIfUnsupported.has_value())
705 {
706 reasonIfUnsupported.value() =
707 "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
708 "FullyConnected weights are set as dynamic (non constant). ";
709 }
710
711 return false;
712 }
713 if (descriptor.m_BiasEnabled && !biases.IsConstant())
714 {
715 if (reasonIfUnsupported.has_value())
716 {
717 reasonIfUnsupported.value() =
718 "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
719 "FullyConnected biases are set as dynamic (non constant). ";
720 }
721 return false;
722 }
723
724 // At the first stage we will only print a warning. this is to give
725 // backend developers a chance to adopt and read weights from input slots.
726 ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
727 "If you are a backend developer please find more information in our "
728 "doxygen documentation on github https://github.com/ARM-software/armnn "
729 "under the keyword 'ConstTensorsAsInputs'.";
730 }
731 }
732
733 return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
734 infos,
735 descriptor,
736 EmptyOptional(),
737 EmptyOptional(),
738 reasonIfUnsupported);
739 }
740
IsGatherSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const GatherDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)741 bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
742 const TensorInfo& input1,
743 const TensorInfo& output,
744 const GatherDescriptor& descriptor,
745 Optional<std::string&> reasonIfUnsupported)
746 {
747 TensorInfos infos{input0, input1, output};
748
749 return m_LayerSupport->IsLayerSupported(LayerType::Gather,
750 infos,
751 descriptor,
752 EmptyOptional(),
753 EmptyOptional(),
754 reasonIfUnsupported);
755 }
756
IsGatherNdSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)757 bool LayerSupportHandle::IsGatherNdSupported(const TensorInfo& input0,
758 const TensorInfo& input1,
759 const TensorInfo& output,
760 Optional<std::string&> reasonIfUnsupported)
761 {
762 TensorInfos infos{input0, input1, output};
763
764 return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
765 infos,
766 BaseDescriptor(),
767 EmptyOptional(),
768 EmptyOptional(),
769 reasonIfUnsupported);
770 }
771
IsInputSupported(const TensorInfo & input,Optional<std::string &> reasonIfUnsupported)772 bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
773 Optional<std::string&> reasonIfUnsupported)
774 {
775 TensorInfos infos{input};
776
777 return m_LayerSupport->IsLayerSupported(LayerType::Input,
778 infos,
779 BaseDescriptor(),
780 EmptyOptional(),
781 EmptyOptional(),
782 reasonIfUnsupported);
783 }
784
IsInstanceNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const InstanceNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)785 bool LayerSupportHandle::IsInstanceNormalizationSupported(
786 const TensorInfo& input,
787 const TensorInfo& output,
788 const InstanceNormalizationDescriptor& descriptor,
789 Optional<std::string&> reasonIfUnsupported)
790 {
791 TensorInfos infos{input, output};
792
793 return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
794 infos,
795 descriptor,
796 EmptyOptional(),
797 EmptyOptional(),
798 reasonIfUnsupported);
799 }
800
IsL2NormalizationSupported(const TensorInfo & input,const TensorInfo & output,const L2NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)801 bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
802 const TensorInfo& output,
803 const L2NormalizationDescriptor& descriptor,
804 Optional<std::string&> reasonIfUnsupported)
805 {
806 TensorInfos infos{input, output};
807
808 return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
809 infos,
810 descriptor,
811 EmptyOptional(),
812 EmptyOptional(),
813 reasonIfUnsupported);
814 }
815
IsLogicalBinarySupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const LogicalBinaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)816 bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
817 const TensorInfo& input1,
818 const TensorInfo& output,
819 const LogicalBinaryDescriptor& descriptor,
820 Optional<std::string&> reasonIfUnsupported)
821 {
822 TensorInfos infos{input0, input1, output};
823
824 return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
825 infos,
826 descriptor,
827 EmptyOptional(),
828 EmptyOptional(),
829 reasonIfUnsupported);
830 }
831
IsLogicalUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)832 bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
833 const TensorInfo& output,
834 const ElementwiseUnaryDescriptor& descriptor,
835 Optional<std::string&> reasonIfUnsupported)
836 {
837 TensorInfos infos{input, output};
838
839 return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
840 infos,
841 descriptor,
842 EmptyOptional(),
843 EmptyOptional(),
844 reasonIfUnsupported);
845 }
846
IsLogSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const LogSoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)847 bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
848 const TensorInfo& output,
849 const LogSoftmaxDescriptor& descriptor,
850 Optional<std::string&> reasonIfUnsupported)
851 {
852 TensorInfos infos{input, output};
853
854 return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
855 infos,
856 descriptor,
857 EmptyOptional(),
858 EmptyOptional(),
859 reasonIfUnsupported);
860 }
861
IsLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & scratchBuffer,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported)862 bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
863 const TensorInfo& outputStateIn,
864 const TensorInfo& cellStateIn,
865 const TensorInfo& scratchBuffer,
866 const TensorInfo& outputStateOut,
867 const TensorInfo& cellStateOut,
868 const TensorInfo& output,
869 const LstmDescriptor& descriptor,
870 const LstmInputParamsInfo& paramsInfo,
871 Optional<std::string&> reasonIfUnsupported)
872 {
873 TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
874
875 return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
876 infos,
877 descriptor,
878 paramsInfo,
879 EmptyOptional(),
880 reasonIfUnsupported);
881 }
882
IsMaximumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)883 bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
884 const TensorInfo& input1,
885 const TensorInfo& output,
886 Optional<std::string&> reasonIfUnsupported)
887 {
888 TensorInfos infos{input0, input1, output};
889
890 return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
891 infos,
892 BaseDescriptor(),
893 EmptyOptional(),
894 EmptyOptional(),
895 reasonIfUnsupported);
896 }
897
IsMeanSupported(const TensorInfo & input,const TensorInfo & output,const MeanDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)898 bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
899 const TensorInfo& output,
900 const MeanDescriptor& descriptor,
901 Optional<std::string&> reasonIfUnsupported)
902 {
903 TensorInfos infos{input, output};
904
905 return m_LayerSupport->IsLayerSupported(LayerType::Mean,
906 infos,
907 descriptor,
908 EmptyOptional(),
909 EmptyOptional(),
910 reasonIfUnsupported);
911 }
912
IsMemCopySupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)913 bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
914 const TensorInfo& output,
915 Optional<std::string&> reasonIfUnsupported)
916 {
917 TensorInfos infos{input, output};
918
919 return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
920 infos,
921 BaseDescriptor(),
922 EmptyOptional(),
923 EmptyOptional(),
924 reasonIfUnsupported);
925 }
926
IsMemImportSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)927 bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
928 const TensorInfo& output,
929 Optional<std::string&> reasonIfUnsupported)
930 {
931 TensorInfos infos{input, output};
932
933 return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
934 infos,
935 BaseDescriptor(),
936 EmptyOptional(),
937 EmptyOptional(),
938 reasonIfUnsupported);
939 }
940
IsMergeSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)941 bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
942 const TensorInfo& input1,
943 const TensorInfo& output,
944 Optional<std::string&> reasonIfUnsupported)
945 {
946 TensorInfos infos{input0, input1, output};
947
948 return m_LayerSupport->IsLayerSupported(LayerType::Merge,
949 infos,
950 BaseDescriptor(),
951 EmptyOptional(),
952 EmptyOptional(),
953 reasonIfUnsupported);
954 }
955
IsMinimumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)956 bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
957 const TensorInfo& input1,
958 const TensorInfo& output,
959 Optional<std::string&> reasonIfUnsupported)
960 {
961 TensorInfos infos{input0, input1, output};
962
963 return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
964 infos,
965 BaseDescriptor(),
966 EmptyOptional(),
967 EmptyOptional(),
968 reasonIfUnsupported);
969 }
970
IsMultiplicationSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)971 bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
972 const TensorInfo& input1,
973 const TensorInfo& output,
974 Optional<std::string&> reasonIfUnsupported)
975 {
976 TensorInfos infos{input0, input1, output};
977
978 return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
979 infos,
980 BaseDescriptor(),
981 EmptyOptional(),
982 EmptyOptional(),
983 reasonIfUnsupported);
984 }
985
IsNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)986 bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
987 const TensorInfo& output,
988 const NormalizationDescriptor& descriptor,
989 Optional<std::string&> reasonIfUnsupported)
990 {
991 TensorInfos infos{input, output};
992
993 return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
994 infos,
995 descriptor,
996 EmptyOptional(),
997 EmptyOptional(),
998 reasonIfUnsupported);
999 }
1000
IsOutputSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1001 bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
1002 Optional<std::string&> reasonIfUnsupported)
1003 {
1004 TensorInfos infos{output};
1005
1006 return m_LayerSupport->IsLayerSupported(LayerType::Output,
1007 infos,
1008 BaseDescriptor(),
1009 EmptyOptional(),
1010 EmptyOptional(),
1011 reasonIfUnsupported);
1012 }
1013
IsPadSupported(const TensorInfo & input,const TensorInfo & output,const PadDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1014 bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
1015 const TensorInfo& output,
1016 const PadDescriptor& descriptor,
1017 Optional<std::string&> reasonIfUnsupported)
1018 {
1019 TensorInfos infos{input, output};
1020
1021 return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1022 infos,
1023 descriptor,
1024 EmptyOptional(),
1025 EmptyOptional(),
1026 reasonIfUnsupported);
1027 }
1028
IsPermuteSupported(const TensorInfo & input,const TensorInfo & output,const PermuteDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1029 bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
1030 const TensorInfo& output,
1031 const PermuteDescriptor& descriptor,
1032 Optional<std::string&> reasonIfUnsupported)
1033 {
1034 TensorInfos infos{input, output};
1035
1036 return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1037 infos,
1038 descriptor,
1039 EmptyOptional(),
1040 EmptyOptional(),
1041 reasonIfUnsupported);
1042 }
1043
IsPooling2dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling2dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1044 bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
1045 const TensorInfo& output,
1046 const Pooling2dDescriptor& descriptor,
1047 Optional<std::string&> reasonIfUnsupported)
1048 {
1049 TensorInfos infos{input, output};
1050
1051 return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1052 infos,
1053 descriptor,
1054 EmptyOptional(),
1055 EmptyOptional(),
1056 reasonIfUnsupported);
1057 }
1058
IsPooling3dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling3dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1059 bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
1060 const TensorInfo& output,
1061 const Pooling3dDescriptor& descriptor,
1062 Optional<std::string&> reasonIfUnsupported)
1063 {
1064 TensorInfos infos{input, output};
1065
1066 return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1067 infos,
1068 descriptor,
1069 EmptyOptional(),
1070 EmptyOptional(),
1071 reasonIfUnsupported);
1072 }
1073
IsPreCompiledSupported(const TensorInfo & input,const PreCompiledDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1074 bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
1075 const PreCompiledDescriptor& descriptor,
1076 Optional<std::string&> reasonIfUnsupported)
1077 {
1078 TensorInfos infos{input};
1079
1080 return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1081 infos,
1082 descriptor,
1083 EmptyOptional(),
1084 EmptyOptional(),
1085 reasonIfUnsupported);
1086 }
1087
IsPreluSupported(const TensorInfo & input,const TensorInfo & alpha,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1088 bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
1089 const TensorInfo& alpha,
1090 const TensorInfo& output,
1091 Optional<std::string&> reasonIfUnsupported)
1092 {
1093 TensorInfos infos{input, alpha, output};
1094
1095 return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1096 infos,
1097 BaseDescriptor(),
1098 EmptyOptional(),
1099 EmptyOptional(),
1100 reasonIfUnsupported);
1101 }
1102
IsQuantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1103 bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
1104 const TensorInfo& output,
1105 Optional<std::string&> reasonIfUnsupported)
1106 {
1107 TensorInfos infos{input, output};
1108
1109 return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1110 infos,
1111 BaseDescriptor(),
1112 EmptyOptional(),
1113 EmptyOptional(),
1114 reasonIfUnsupported);
1115 }
1116
IsQLstmSupported(const TensorInfo & input,const TensorInfo & previousOutputIn,const TensorInfo & previousCellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const QLstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported)1117 bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
1118 const TensorInfo& previousOutputIn,
1119 const TensorInfo& previousCellStateIn,
1120 const TensorInfo& outputStateOut,
1121 const TensorInfo& cellStateOut,
1122 const TensorInfo& output,
1123 const QLstmDescriptor& descriptor,
1124 const LstmInputParamsInfo& paramsInfo,
1125 Optional<std::string&> reasonIfUnsupported)
1126 {
1127 TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1128
1129 return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1130 infos,
1131 descriptor,
1132 paramsInfo,
1133 EmptyOptional(),
1134 reasonIfUnsupported);
1135 }
1136
IsQuantizedLstmSupported(const TensorInfo & input,const TensorInfo & previousCellStateIn,const TensorInfo & previousOutputIn,const TensorInfo & cellStateOut,const TensorInfo & output,const QuantizedLstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported)1137 bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
1138 const TensorInfo& previousCellStateIn,
1139 const TensorInfo& previousOutputIn,
1140 const TensorInfo& cellStateOut,
1141 const TensorInfo& output,
1142 const QuantizedLstmInputParamsInfo& paramsInfo,
1143 Optional<std::string&> reasonIfUnsupported)
1144 {
1145 TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1146
1147 return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1148 infos,
1149 BaseDescriptor(),
1150 EmptyOptional(),
1151 paramsInfo,
1152 reasonIfUnsupported);
1153 }
1154
IsRankSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1155 bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
1156 const TensorInfo& output,
1157 Optional<std::string&> reasonIfUnsupported)
1158 {
1159 TensorInfos infos{input, output};
1160
1161 return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1162 infos,
1163 BaseDescriptor(),
1164 EmptyOptional(),
1165 EmptyOptional(),
1166 reasonIfUnsupported);
1167 }
1168
IsReduceSupported(const TensorInfo & input,const TensorInfo & output,const ReduceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1169 bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
1170 const TensorInfo& output,
1171 const ReduceDescriptor& descriptor,
1172 Optional<std::string&> reasonIfUnsupported)
1173 {
1174 TensorInfos infos{input, output};
1175
1176 return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1177 infos,
1178 descriptor,
1179 EmptyOptional(),
1180 EmptyOptional(),
1181 reasonIfUnsupported);
1182 }
1183
IsReshapeSupported(const TensorInfo & input,const TensorInfo & output,const ReshapeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1184 bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
1185 const TensorInfo& output,
1186 const ReshapeDescriptor& descriptor,
1187 Optional<std::string&> reasonIfUnsupported)
1188 {
1189 TensorInfos infos{input, output};
1190
1191 return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1192 infos,
1193 descriptor,
1194 EmptyOptional(),
1195 EmptyOptional(),
1196 reasonIfUnsupported);
1197 }
1198
IsResizeSupported(const TensorInfo & input,const TensorInfo & output,const ResizeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1199 bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
1200 const TensorInfo& output,
1201 const ResizeDescriptor& descriptor,
1202 Optional<std::string&> reasonIfUnsupported)
1203 {
1204 TensorInfos infos{input, output};
1205
1206 return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1207 infos,
1208 descriptor,
1209 EmptyOptional(),
1210 EmptyOptional(),
1211 reasonIfUnsupported);
1212 }
1213
IsShapeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1214 bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
1215 const TensorInfo& output,
1216 Optional<std::string&> reasonIfUnsupported)
1217 {
1218 TensorInfos infos{input, output};
1219
1220 return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1221 infos,
1222 BaseDescriptor(),
1223 EmptyOptional(),
1224 EmptyOptional(),
1225 reasonIfUnsupported);
1226 }
1227
IsSliceSupported(const TensorInfo & input,const TensorInfo & output,const SliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1228 bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
1229 const TensorInfo& output,
1230 const SliceDescriptor& descriptor,
1231 Optional<std::string&> reasonIfUnsupported)
1232 {
1233 TensorInfos infos{input, output};
1234
1235 return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1236 infos,
1237 descriptor,
1238 EmptyOptional(),
1239 EmptyOptional(),
1240 reasonIfUnsupported);
1241 }
1242
IsSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const SoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1243 bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
1244 const TensorInfo& output,
1245 const SoftmaxDescriptor& descriptor,
1246 Optional<std::string&> reasonIfUnsupported)
1247 {
1248 TensorInfos infos{input, output};
1249
1250 return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1251 infos,
1252 descriptor,
1253 EmptyOptional(),
1254 EmptyOptional(),
1255 reasonIfUnsupported);
1256 }
1257
IsSpaceToBatchNdSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToBatchNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1258 bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
1259 const TensorInfo& output,
1260 const SpaceToBatchNdDescriptor& descriptor,
1261 Optional<std::string&> reasonIfUnsupported)
1262 {
1263 TensorInfos infos{input, output};
1264
1265 return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1266 infos,
1267 descriptor,
1268 EmptyOptional(),
1269 EmptyOptional(),
1270 reasonIfUnsupported);
1271 }
1272
IsSpaceToDepthSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToDepthDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1273 bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
1274 const TensorInfo& output,
1275 const SpaceToDepthDescriptor& descriptor,
1276 Optional<std::string&> reasonIfUnsupported)
1277 {
1278 TensorInfos infos{input, output};
1279
1280 return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1281 infos,
1282 descriptor,
1283 EmptyOptional(),
1284 EmptyOptional(),
1285 reasonIfUnsupported);
1286 }
1287
IsSplitterSupported(const TensorInfo & input,const std::vector<std::reference_wrapper<TensorInfo>> & outputs,const ViewsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1288 bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
1289 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1290 const ViewsDescriptor& descriptor,
1291 Optional<std::string&> reasonIfUnsupported)
1292 {
1293 TensorInfos infos{input};
1294 for (TensorInfo outInfo : outputs)
1295 {
1296 infos.push_back(outInfo);
1297 }
1298
1299 return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1300 infos,
1301 descriptor,
1302 EmptyOptional(),
1303 EmptyOptional(),
1304 reasonIfUnsupported);
1305 }
1306
IsStackSupported(const std::vector<const TensorInfo * > & inputs,const TensorInfo & output,const StackDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1307 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1308 const TensorInfo& output,
1309 const StackDescriptor& descriptor,
1310 Optional<std::string&> reasonIfUnsupported)
1311 {
1312 TensorInfos infos;
1313 for (const TensorInfo* inputInfo : inputs)
1314 {
1315 infos.push_back(*inputInfo);
1316 }
1317 infos.push_back(output);
1318
1319 return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1320 infos,
1321 descriptor,
1322 EmptyOptional(),
1323 EmptyOptional(),
1324 reasonIfUnsupported);
1325 }
1326
IsStandInSupported(const std::vector<const TensorInfo * > & inputs,const std::vector<const TensorInfo * > & outputs,const StandInDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1327 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1328 const std::vector<const TensorInfo*>& outputs,
1329 const StandInDescriptor& descriptor,
1330 Optional<std::string&> reasonIfUnsupported)
1331 {
1332 TensorInfos infos;
1333 for (const TensorInfo* inputInfo : inputs)
1334 {
1335 infos.push_back(*inputInfo);
1336 }
1337 for (const TensorInfo* outputInfo : outputs)
1338 {
1339 infos.push_back(*outputInfo);
1340 }
1341
1342 return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1343 infos,
1344 descriptor,
1345 EmptyOptional(),
1346 EmptyOptional(),
1347 reasonIfUnsupported);
1348 }
1349
1350
IsStridedSliceSupported(const TensorInfo & input,const TensorInfo & output,const StridedSliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1351 bool LayerSupportHandle::IsStridedSliceSupported(const TensorInfo& input,
1352 const TensorInfo& output,
1353 const StridedSliceDescriptor& descriptor,
1354 Optional<std::string&> reasonIfUnsupported)
1355 {
1356 TensorInfos infos{input, output};
1357
1358 return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1359 infos,
1360 descriptor,
1361 EmptyOptional(),
1362 EmptyOptional(),
1363 reasonIfUnsupported);
1364 }
1365
IsSubtractionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported)1366 bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
1367 const TensorInfo& input1,
1368 const TensorInfo& output,
1369 Optional<std::string&> reasonIfUnsupported)
1370 {
1371 TensorInfos infos{input0, input1, output};
1372
1373 return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1374 infos,
1375 BaseDescriptor(),
1376 EmptyOptional(),
1377 EmptyOptional(),
1378 reasonIfUnsupported);
1379 }
1380
IsSwitchSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output0,const TensorInfo & output1,Optional<std::string &> reasonIfUnsupported)1381 bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
1382 const TensorInfo& input1,
1383 const TensorInfo& output0,
1384 const TensorInfo& output1,
1385 Optional<std::string&> reasonIfUnsupported)
1386 {
1387 TensorInfos infos{input0, input1, output0, output1};
1388
1389 return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1390 infos,
1391 BaseDescriptor(),
1392 EmptyOptional(),
1393 EmptyOptional(),
1394 reasonIfUnsupported);
1395 }
1396
IsTransposeConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const TransposeConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported)1397 bool LayerSupportHandle::IsTransposeConvolution2dSupported(
1398 const TensorInfo& input,
1399 const TensorInfo& output,
1400 const TransposeConvolution2dDescriptor& descriptor,
1401 const TensorInfo& weights,
1402 const Optional<TensorInfo>& biases,
1403 Optional<std::string&> reasonIfUnsupported)
1404 {
1405 TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1406 TensorInfos infos{input, output, weights, biasesVal};
1407
1408 return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1409 infos,
1410 descriptor,
1411 EmptyOptional(),
1412 EmptyOptional(),
1413 reasonIfUnsupported);
1414 }
1415
IsTransposeSupported(const TensorInfo & input,const TensorInfo & output,const TransposeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported)1416 bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
1417 const TensorInfo& output,
1418 const TransposeDescriptor& descriptor,
1419 Optional<std::string&> reasonIfUnsupported)
1420 {
1421 TensorInfos infos{input, output};
1422
1423 return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1424 infos,
1425 descriptor,
1426 EmptyOptional(),
1427 EmptyOptional(),
1428 reasonIfUnsupported);
1429 }
1430
IsUnidirectionalSequenceLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported)1431 bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1432 const TensorInfo& outputStateIn,
1433 const TensorInfo& cellStateIn,
1434 const TensorInfo& outputStateOut,
1435 const TensorInfo& cellStateOut,
1436 const TensorInfo& output,
1437 const LstmDescriptor& descriptor,
1438 const LstmInputParamsInfo& paramsInfo,
1439 Optional<std::string&> reasonIfUnsupported)
1440 {
1441 TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1442
1443 return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1444 infos,
1445 descriptor,
1446 paramsInfo,
1447 EmptyOptional(),
1448 reasonIfUnsupported);
1449 }
1450
1451 }