xref: /aosp_15_r20/external/armnn/delegate/opaque/src/OpaqueDelegateUtils.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn_delegate.hpp>
9 #include <DelegateUtils.hpp>
10 
11 #include <armnn/ArmNN.hpp>
12 #include <armnn/BackendHelper.hpp>
13 #include <armnn/utility/Assert.hpp>
14 #include <armnn/utility/NumericCast.hpp>
15 
16 #include <armnnUtils/Permute.hpp>
17 #include <armnnUtils/TensorUtils.hpp>
18 
19 #include <tensorflow/lite/builtin_ops.h>
20 #include <tensorflow/lite/c/builtin_op_data.h>
21 #include <tensorflow/lite/c/common.h>
22 #include <tensorflow/lite/c/c_api_opaque.h>
23 #include <tensorflow/lite/minimal_logging.h>
24 #include <tensorflow/lite/kernels/kernel_util.h>
25 
26 namespace
27 {
28 
29 // Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
30 #define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
31 try \
32 { \
33     for (auto&& backendId : backends) \
34     { \
35         auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
36         if (layerSupportObject.IsBackendRegistered()) \
37         { \
38             std::string reasonIfUnsupported; \
39             supported = \
40                 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
41             if (supported) \
42             { \
43                 setBackend = backendId; \
44                 break; \
45             } \
46             else \
47             { \
48                 if (reasonIfUnsupported.size() > 0) \
49                 { \
50                     TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
51                                     "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
52                 } \
53                 else \
54                 { \
55                     TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
56                                     "%s: not supported by armnn", opName); \
57                 } \
58             } \
59         } \
60         else \
61         { \
62             TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", \
63                                       opName, backendId.Get().c_str()); \
64         } \
65     } \
66     if (!supported) \
67     { \
68         TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
69     } \
70 } \
71 catch (const armnn::InvalidArgumentException &e) \
72 { \
73     throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
74 }
75 
ValidateNumInputs(TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,const unsigned int expectedSize,int nodeIndex)76 TfLiteStatus ValidateNumInputs(TfLiteOpaqueContext* tfLiteContext,
77                                TfLiteOpaqueNode* tfLiteNode,
78                                const unsigned int expectedSize,
79                                int nodeIndex)
80 {
81     int numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
82     if (static_cast<unsigned int>(numInputs) != expectedSize)
83     {
84         TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
85                 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node #%d",
86                 numInputs, expectedSize, nodeIndex);
87         return kTfLiteError;
88     }
89     return kTfLiteOk;
90 }
91 
ValidateNumOutputs(TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,const unsigned int expectedSize,int nodeIndex)92 TfLiteStatus ValidateNumOutputs(TfLiteOpaqueContext* tfLiteContext,
93                                 TfLiteOpaqueNode* tfLiteNode,
94                                 const unsigned int expectedSize,
95                                 int nodeIndex)
96 {
97     auto numOutputs = TfLiteOpaqueNodeNumberOfOutputs(tfLiteNode);
98     if (static_cast<unsigned int>(numOutputs) != expectedSize)
99     {
100         TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
101                 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node #%d",
102                 numOutputs, expectedSize, nodeIndex);
103         return kTfLiteError;
104     }
105     return kTfLiteOk;
106 }
107 
IsConstantTensor(const TfLiteOpaqueTensor * tfLiteTensor)108 bool IsConstantTensor(const TfLiteOpaqueTensor* tfLiteTensor)
109 {
110     auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
111     if (tensorAllocationType == kTfLiteMmapRo)
112     {
113         return true;
114     }
115     return false;
116 }
117 
IsDynamicTensor(const TfLiteOpaqueTensor * tfLiteTensor)118 bool IsDynamicTensor(const TfLiteOpaqueTensor* tfLiteTensor)
119 {
120     auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
121     if (tensorAllocationType == kTfLiteDynamic)
122     {
123         return true;
124     }
125     return false;
126 }
127 
IsValid(const TfLiteOpaqueTensor * tfLiteTensor)128 bool IsValid(const TfLiteOpaqueTensor* tfLiteTensor)
129 {
130     return tfLiteTensor == nullptr ? false : true;
131 }
132 
IsValid(TfLiteOpaqueContext * tfLiteContext,const TfLiteOpaqueTensor * tfLiteTensor,int32_t operatorCode,int32_t nodeIndex)133 bool IsValid(TfLiteOpaqueContext* tfLiteContext,
134              const TfLiteOpaqueTensor* tfLiteTensor,
135              int32_t operatorCode,
136              int32_t nodeIndex)
137 {
138     if(!IsValid(tfLiteTensor))
139     {
140         TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
141                 tfLiteContext,
142                 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
143                 operatorCode, nodeIndex);
144         return false;
145     }
146     if (IsDynamicTensor(tfLiteTensor))
147     {
148         TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
149                 tfLiteContext,
150                 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
151                 operatorCode, nodeIndex);
152         return false;
153     }
154     return true;
155 }
156 
IsAffineQuantization(const TfLiteOpaqueTensor & tfLiteTensor)157 bool IsAffineQuantization(const TfLiteOpaqueTensor& tfLiteTensor)
158 {
159     auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(&tfLiteTensor);
160     if (quantizationInfo.type == kTfLiteAffineQuantization)
161     {
162         return true;
163     }
164     return false;
165 }
166 
167 // Connects the layer to the graph
Connect(armnn::IConnectableLayer * layer,TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,armnnOpaqueDelegate::DelegateData & data)168 TfLiteStatus Connect(armnn::IConnectableLayer* layer,
169                      TfLiteOpaqueContext* tfLiteContext,
170                      TfLiteOpaqueNode* tfLiteNode,
171                      armnnOpaqueDelegate::DelegateData& data)
172 {
173     // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
174     // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
175     // tensors for each input slot in the node.
176     const int* inputIndexArray;
177     int numInputs;
178     if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
179     {
180         return kTfLiteError;
181     }
182     // numInputs is set from TfLiteOpaqueNodeInputs.
183     if(numInputs != static_cast<int>(layer->GetNumInputSlots()))
184     {
185         ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of input slots does not match actual "
186                                                           "number of input slots.";
187         return kTfLiteError;
188     }
189     // Connect the input slots.
190     // For each input slot, get the index of the opaque tensor that was allocated for it.
191     for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
192     {
193         if (data.m_OutputSlotForNode[inputIndexArray[inputIndex]] != nullptr)
194         {
195             data.m_OutputSlotForNode[inputIndexArray[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
196         }
197     }
198 
199     // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
200     // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
201     // each output slot in the node.
202     const int* outputIndexArray;
203     int numOutputs;
204     if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
205     {
206         return kTfLiteError;
207     }
208     // numOutputs is set from TfLiteOpaqueNodeOutputs.
209     if(numOutputs != static_cast<int>(layer->GetNumOutputSlots()))
210     {
211         ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of output slots does not match actual "
212                                                              "number of output slots.";
213         return kTfLiteError;
214     }
215 
216     // Prepare output slots
217     for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
218     {
219         armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
220         data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
221     }
222 
223     return kTfLiteOk;
224 }
225 
FusedActivation(TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,TfLiteFusedActivation activationType,armnn::IConnectableLayer * prevLayer,unsigned int outputSlotIndex,armnnOpaqueDelegate::DelegateData & data)226 TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
227                              TfLiteOpaqueNode* tfLiteNode,
228                              TfLiteFusedActivation activationType,
229                              armnn::IConnectableLayer* prevLayer,
230                              unsigned int outputSlotIndex,
231                              armnnOpaqueDelegate::DelegateData& data)
232 {
233     const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
234 
235     armnn::ActivationDescriptor activationDesc;
236 
237     switch (activationType)
238     {
239         case kTfLiteActNone:
240         {
241             // No Activation
242             return kTfLiteOk;
243         }
244         case kTfLiteActRelu:
245         {
246             activationDesc.m_Function = armnn::ActivationFunction::ReLu;
247             break;
248         }
249         case kTfLiteActReluN1To1:
250         {
251             activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
252             activationDesc.m_A = 1.0f;
253             activationDesc.m_B = -1.0f;
254             break;
255         }
256         case kTfLiteActRelu6:
257         {
258             activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
259             activationDesc.m_A = 6.0f;
260             activationDesc.m_B = 0.0f;
261             break;
262         }
263         case kTfLiteActSigmoid:
264         {
265             activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
266             break;
267         }
268         case kTfLiteActTanh:
269         {
270             activationDesc.m_Function = armnn::ActivationFunction::TanH;
271             activationDesc.m_A = 1.0f;
272             activationDesc.m_B = 1.0f;
273             break;
274         }
275         default:
276             return kTfLiteError;
277     }
278 
279     bool isSupported = false;
280     armnn::BackendId setBackend;
281     FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
282                                       tfLiteContext,
283                                       IsActivationSupported,
284                                       data.m_Backends,
285                                       isSupported,
286                                       setBackend,
287                                       activationOutputInfo,
288                                       activationOutputInfo,
289                                       activationDesc);
290     if (!isSupported)
291     {
292         return kTfLiteError;
293     }
294     armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
295     activationLayer->SetBackendId(setBackend);
296 
297     ARMNN_ASSERT(activationLayer != nullptr);
298     activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
299 
300     // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
301     // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
302     // each output slot in the node.
303     const int* outputIndexArray;
304     int numOutputs;
305     TfLiteStatus outputStatus = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs);
306     if(outputStatus != kTfLiteOk)
307     {
308         return kTfLiteError;
309     }
310 
311     // Connect and prepare output slots
312     for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
313     {
314         data.m_OutputSlotForNode[static_cast<unsigned long>(
315                 outputIndexArray[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
316 
317         armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
318         data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
319     }
320     return kTfLiteOk;
321 }
322 
AddReshapeLayer(TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,armnn::IConnectableLayer * prevLayer,armnn::TensorInfo reshapedOutputTensorInfo,armnn::TensorInfo outputTensorInfo,armnnOpaqueDelegate::DelegateData & data)323 armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
324                                           TfLiteOpaqueNode* tfLiteNode,
325                                           armnn::IConnectableLayer* prevLayer,
326                                           armnn::TensorInfo reshapedOutputTensorInfo,
327                                           armnn::TensorInfo outputTensorInfo,
328                                           armnnOpaqueDelegate::DelegateData& data)
329 {
330     armnn::ReshapeDescriptor desc;
331     desc.m_TargetShape = outputTensorInfo.GetShape();
332 
333     bool isSupported = false;
334     armnn::BackendId setBackend;
335     FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
336                                       tfLiteContext,
337                                       IsReshapeSupported,
338                                       data.m_Backends,
339                                       isSupported,
340                                       setBackend,
341                                       reshapedOutputTensorInfo,
342                                       outputTensorInfo,
343                                       desc);
344 
345     if (!isSupported)
346     {
347         return nullptr;
348     }
349 
350     armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
351     reshapeLayer->SetBackendId(setBackend);
352     ARMNN_ASSERT(reshapeLayer != nullptr);
353 
354     prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
355     reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
356 
357     // Gather array of indices and it's length, replaces node->outputs->data[i]
358     const int* outputIndices = nullptr;
359     int numOutputs = 0;
360 
361     TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &numOutputs);
362     if(status != kTfLiteOk)
363     {
364         throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
365     }
366 
367     if (static_cast<unsigned int>(numOutputs) != reshapeLayer->GetNumOutputSlots())
368     {
369         throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (" +
370                                std::to_string(numOutputs) +
371                                "!= " +
372                                std::to_string(reshapeLayer->GetNumOutputSlots()) +
373                                ") in node.");
374     }
375 
376     // Connect and prepare output slots
377     for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
378     {
379         data.m_OutputSlotForNode[static_cast<unsigned long>(
380                 outputIndices[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
381 
382         armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
383         data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
384     }
385     return reshapeLayer;
386 }
387 
GetDataType(const TfLiteOpaqueTensor * tfLiteTensor)388 armnn::DataType GetDataType(const TfLiteOpaqueTensor* tfLiteTensor)
389 {
390     switch (TfLiteOpaqueTensorType(tfLiteTensor))
391     {
392         case kTfLiteBool:
393             return armnn::DataType::Boolean;
394         case kTfLiteFloat32:
395             return armnn::DataType::Float32;
396         case kTfLiteFloat16:
397             return armnn::DataType::Float16;
398         case kTfLiteUInt8:
399             return armnn::DataType::QAsymmU8;
400         case kTfLiteInt8:
401         {
402             auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
403             if (quantizationInfo.type == kTfLiteAffineQuantization)
404             {
405                 auto* quantization =
406                         reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
407 
408                 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
409                 {
410                     return armnn::DataType::QAsymmS8;
411                 }
412                 else
413                 {
414                     return armnn::DataType::QSymmS8;
415                 }
416             }
417             else
418             {
419                 return armnn::DataType::QAsymmS8;
420             }
421         }
422         case kTfLiteInt16:
423             return armnn::DataType::QSymmS16;
424         case kTfLiteInt32:
425             return armnn::DataType::Signed32;
426         case kTfLiteInt64:
427             return armnn::DataType::Signed64;
428         default:
429             throw armnn::Exception(
430                     &"TfLiteArmnnDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
431     }
432 }
433 
GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor * tfLiteTensor,bool isOutput=false)434 armnn::TensorInfo GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor* tfLiteTensor, bool isOutput = false)
435 {
436     armnn::DataType type = GetDataType(tfLiteTensor);
437     armnn::TensorInfo ret;
438 
439     auto tensorDimensionSize = TfLiteOpaqueTensorNumDims(tfLiteTensor);
440     if (tensorDimensionSize == 0)
441     {
442         // If input tensor does not have a shape
443         // assuming that it has 1D tensor
444         if (!isOutput)
445         {
446             std::vector<unsigned int> safeShape = { 1 };
447             bool dimensionsSpecificity[1] = { true };
448 
449             armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
450                                            safeShape.data(),
451                                            dimensionsSpecificity);
452             ret = armnn::TensorInfo(tensorShape, type);
453 
454             if(IsConstantTensor(tfLiteTensor))
455             {
456                 ret.SetConstant(true);
457             }
458         }
459         else
460         {
461             armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
462             ret = armnn::TensorInfo(tensorShape, type);
463         }
464     }
465     else
466     {
467         std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
468         bool dimensionsSpecificity[5] = { true, true, true, true, true };
469 
470         for (int32_t i = 0; i < tensorDimensionSize; ++i)
471         {
472             int32_t dim = TfLiteOpaqueTensorDim(tfLiteTensor, i);
473 
474             if (dim == 0)
475             {
476                 dimensionsSpecificity[i] = false;
477             }
478             tensorDims[i] = static_cast<unsigned int>(dim);
479         }
480 
481         armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
482                                        tensorDims.data(),
483                                        dimensionsSpecificity);
484 
485         if(IsConstantTensor(tfLiteTensor))
486         {
487             ret = armnn::TensorInfo(tensorShape, type);
488             ret.SetConstant(true);
489         }
490         else
491         {
492             ret = armnn::TensorInfo(tensorShape, type);
493         }
494     }
495 
496     auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
497     if (quantizationInfo.type == kTfLiteAffineQuantization)
498     {
499         // get per-channel quantization parameters
500         const auto* affineQuantization =
501                 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
502         if (affineQuantization->scale->size > 1)
503         {
504             std::vector<float> quantizationScales;
505             for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
506             {
507                 quantizationScales.push_back(affineQuantization->scale->data[i]);
508             }
509             ret.SetQuantizationScales(quantizationScales);
510             ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
511         }
512         else
513         {
514             ret.SetQuantizationScale(affineQuantization->scale->data[0]);
515             ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
516         }
517     }
518     else
519     {
520         auto quantizationParameters = TfLiteOpaqueTensorGetQuantizationParams(tfLiteTensor);
521         ret.SetQuantizationScale(quantizationParameters.scale);
522         ret.SetQuantizationOffset(quantizationParameters.zero_point);
523     }
524 
525     return ret;
526 }
527 
CreateConstTensor(const TfLiteOpaqueTensor * tfLiteTensor,const armnn::TensorInfo & tensorInfo)528 armnn::ConstTensor CreateConstTensor(const TfLiteOpaqueTensor* tfLiteTensor,
529                                      const armnn::TensorInfo& tensorInfo)
530 {
531     auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
532     if (allocType != kTfLiteMmapRo)
533     {
534         throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(allocType));
535     }
536 
537     return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
538 }
539 
GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode,int index)540 armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext* tfLiteContext,
541                                                   TfLiteOpaqueNode* tfLiteNode,
542                                                   int index)
543 {
544     const TfLiteOpaqueTensor* tfLiteTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, index);
545     armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteTensor);
546 
547     return new armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
548 }
549 
IsOptionalOperandPresent(TfLiteOpaqueNode * tfLiteNode,const int operandIndex)550 bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandIndex)
551 {
552     // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
553     // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
554     // tensors for each input slot in the node.
555     const int* inputIndexArray;
556     int numInputs = 0;
557 
558     TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
559     if(status != kTfLiteOk)
560     {
561         throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
562     }
563 
564     // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
565     // less then the input is not present.
566     if (numInputs > operandIndex && inputIndexArray[operandIndex] >= 0)
567     {
568         return true;
569     }
570     return false;
571 }
572 
ProcessInputs(armnn::IConnectableLayer * layer,armnnOpaqueDelegate::DelegateData & delegateData,TfLiteOpaqueContext * tfLiteContext,TfLiteOpaqueNode * tfLiteNode)573 TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
574                            armnnOpaqueDelegate::DelegateData& delegateData,
575                            TfLiteOpaqueContext* tfLiteContext,
576                            TfLiteOpaqueNode* tfLiteNode)
577 {
578     // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
579     // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
580     // tensors for each input slot in the node.
581     const int* inputIndexArray;
582     int numInputs = 0;
583 
584     TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
585     if(status != kTfLiteOk)
586     {
587         throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
588     }
589 
590     // Process input tensors
591     // If input tensor is a Constant tensor create a constant layer and connect it to the network
592     for (int32_t inputIndex = 0; inputIndex < static_cast<int32_t>(layer->GetNumInputSlots()); ++inputIndex)
593     {
594         const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, inputIndex);
595 
596         if (IsConstantTensor(tfLiteInputTensor))
597         {
598             armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
599 
600             bool isSupported = false;
601             armnn::BackendId setBackend;
602             FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONSTANT",
603                                               tfLiteContext,
604                                               IsConstantSupported,
605                                               delegateData.m_Backends,
606                                               isSupported,
607                                               setBackend,
608                                               inputTensorInfo);
609             if (!isSupported)
610             {
611                 return kTfLiteError;
612             }
613 
614             auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
615 
616             armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
617             constantLayer->SetBackendId(setBackend);
618             armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
619             outputSlot.SetTensorInfo(inputTensorInfo);
620 
621             delegateData.m_OutputSlotForNode[inputIndexArray[inputIndex]] = &outputSlot;
622         }
623     }
624     return kTfLiteOk;
625 }
626 
627 } // namespace anonymous
628