xref: /aosp_15_r20/external/ComputeLibrary/docs/user_guide/operator_list.dox (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1///
2/// Copyright (c) 2021-2022 Arm Limited.
3///
4/// SPDX-License-Identifier: MIT
5///
6/// Permission is hereby granted, free of charge, to any person obtaining a copy
7/// of this software and associated documentation files (the "Software"), to
8/// deal in the Software without restriction, including without limitation the
9/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10/// sell copies of the Software, and to permit persons to whom the Software is
11/// furnished to do so, subject to the following conditions:
12///
13/// The above copyright notice and this permission notice shall be included in all
14/// copies or substantial portions of the Software.
15///
16/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22/// SOFTWARE.
23///
24namespace arm_compute
25{
26/**
27@page operators_list Supported Operators
28
29@tableofcontents
30
31@section S9_1_operators_list Supported Operators
32
33Compute Library supports operators that are listed in below table.
34
35Compute Library supports a wide list of data-types, information can been directly found in the documentation of each kernel/function.
36The main data-types that the Machine Learning functions support are the following:
37  <ul>
38    <li>BFLOAT16: 16-bit non-standard brain floating point
39    <li>QASYMM8: 8-bit unsigned asymmetric quantized
40    <li>QASYMM8_SIGNED: 8-bit signed asymmetric quantized
41    <li>QSYMM8_PER_CHANNEL: 8-bit signed symmetric quantized (Used for the weights)
42    <li>QSYMM8: 8-bit unsigned symmetric quantized
43    <li>QSYMM16: 16-bit unsigned symmetric quantized
44    <li>F32: 32-bit single precision floating point
45    <li>F16: 16-bit half precision floating point
46    <li>S32: 32-bit signed integer
47    <li>U8: 8-bit unsigned char
48    <li>All: Agnostic to any specific data type
49  </ul>
50
51Compute Library supports the following data layouts (fast changing dimension from right to left):
52  <ul>
53    <li>NHWC: The native layout of Compute Library that delivers the best performance where channels are in the fastest changing dimension
54    <li>NCHW: Legacy layout where width is in the fastest changing dimension
55    <li>NDHWC: New data layout for supporting 3D operators
56    <li>All: Agnostic to any specific data layout
57  </ul>
58where N = batches, C = channels, H = height, W = width, D = depth
59
60<table>
61<caption id="multi_row"></caption>
62<tr>
63  <th>Function
64  <th>Description
65  <th>Equivalent Android NNAPI Op
66  <th>Backends
67  <th>Data Layouts
68  <th>Data Types
69<tr>
70  <td rowspan="2">ActivationLayer
71  <td rowspan="2" style="width:200px;"> Function to simulate an activation layer with the specified activation function.
72  <td rowspan="2">
73      <ul>
74       <li>ANEURALNETWORKS_ELU
75       <li>ANEURALNETWORKS_HARD_SWISH
76       <li>ANEURALNETWORKS_LOGISTIC
77       <li>ANEURALNETWORKS_RELU
78       <li>ANEURALNETWORKS_RELU1
79       <li>ANEURALNETWORKS_RELU6
80       <li>ANEURALNETWORKS_TANH
81      </ul>
82  <td>NEActivationLayer
83  <td>
84      <ul>
85       <li>All
86      </ul>
87  <td>
88    <table>
89    <tr><th>src<th>dst
90    <tr><td>QASYMM8<td>QASYMM8
91    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
92    <tr><td>QSYMM16<td>QSYMM16
93    <tr><td>F16<td>F16
94    <tr><td>F32<td>F32
95    </table>
96<tr>
97  <td>CLActivationLayer
98  <td>
99      <ul>
100       <li>All
101      </ul>
102  <td>
103    <table>
104    <tr><th>src<th>dst
105    <tr><td>QASYMM8<td>QASYMM8
106    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
107    <tr><td>QSYMM16<td>QSYMM16
108    <tr><td>F16<td>F16
109    <tr><td>F32<td>F32
110    </table>
111<tr>
112  <td rowspan="2">ArgMinMaxLayer
113  <td rowspan="2" style="width:200px;"> Function to calculate the index of the minimum or maximum values in a tensor based on an axis.
114  <td rowspan="2">
115      <ul>
116       <li>ANEURALNETWORKS_ARGMAX
117       <li>ANEURALNETWORKS_ARGMIN
118      </ul>
119  <td>NEArgMinMaxLayer
120  <td>
121      <ul>
122       <li>All
123      </ul>
124  <td>
125    <table>
126    <tr><th>src<th>dst
127    <tr><td>QASYMM8<td>U32, S32
128    <tr><td>QASYMM8_SIGNED<td>U32, S32
129    <tr><td>S32<td>U32, S32
130    <tr><td>F16<td>U32, S32
131    <tr><td>F32<td>U32, S32
132    </table>
133<tr>
134  <td>CLArgMinMaxLayer
135  <td>
136      <ul>
137       <li>All
138      </ul>
139  <td>
140    <table>
141    <tr><th>src<th>dst
142    <tr><td>QASYMM8<td>U32, S32
143    <tr><td>QASYMM8_SIGNED<td>U32, S32
144    <tr><td>S32<td>U32, S32
145    <tr><td>F16<td>U32, S32
146    <tr><td>F32<td>U32, S32
147    </table>
148<tr>
149  <td rowspan="1">ArithmeticAddition
150  <td rowspan="1" style="width:200px;"> Function to add 2 tensors.
151  <td rowspan="1">
152      <ul>
153       <li>ANEURALNETWORKS_ADD
154      </ul>
155  <td>NEArithmeticAddition
156  <td>
157      <ul>
158       <li>All
159      </ul>
160  <td>
161    <table>
162    <tr><th>src0<th>src1<th>dst
163    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
164    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
165    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
166    <tr><td>QSYMM16<td>QSYMM16<td>S32
167    <tr><td>U8<td>U8<td>U8
168    <tr><td>S16<td>S16<td>S16
169    <tr><td>S32<td>S32<td>S32
170    <tr><td>F16<td>F16<td>F16
171    <tr><td>F32<td>F32<td>F32
172    </table>
173<tr>
174  <td rowspan="1">ArithmeticSubtraction
175  <td rowspan="1" style="width:200px;"> Function to substract 2 tensors.
176  <td rowspan="1">
177      <ul>
178       <li>ANEURALNETWORKS_SUB
179      </ul>
180  <td>NEArithmeticSubtraction
181  <td>
182      <ul>
183       <li>All
184      </ul>
185  <td>
186    <table>
187    <tr><th>src0<th>src1<th>dst
188    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
189    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
190    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
191    <tr><td>QSYMM16<td>QSYMM16<td>S32
192    <tr><td>U8<td>U8<td>U8
193    <tr><td>S16<td>S16<td>S16
194    <tr><td>S32<td>S32<td>S32
195    <tr><td>F16<td>F16<td>F16
196    <tr><td>F32<td>F32<td>F32
197    </table>
198<tr>
199  <td rowspan="2">BatchNormalizationLayer
200  <td rowspan="2" style="width:200px;"> Function to perform batch normalization.
201  <td rowspan="2">
202      <ul>
203       <li>n/a
204      </ul>
205  <td>NEBatchNormalizationLayer
206  <td>
207      <ul>
208       <li>NHWC
209       <li>NCHW
210      </ul>
211  <td>
212    <table>
213    <tr><th>src<th>dst
214    <tr><td>F32<td>F32
215    <tr><td>F16<td>F16
216    </table>
217<tr>
218  <td>CLBatchNormalizationLayer
219  <td>
220      <ul>
221       <li>NHWC
222       <li>NCHW
223      </ul>
224  <td>
225    <table>
226    <tr><th>src<th>dst
227    <tr><td>F32<td>F32
228    <tr><td>F16<td>F16
229    </table>
230<tr>
231  <td rowspan="2">BatchToSpaceLayer
232  <td rowspan="2" style="width:200px;"> Batch to space transformation.
233  <td rowspan="2">
234      <ul>
235       <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
236      </ul>
237  <td>NEBatchToSpaceLayer
238  <td>
239      <ul>
240       <li>NHWC
241       <li>NCHW
242      </ul>
243  <td>
244    <table>
245    <tr><th>src0<th>src1<th>dst
246    <tr><td>All<td>s32<td>All
247    </table>
248<tr>
249  <td>CLBatchToSpaceLayer
250  <td>
251      <ul>
252       <li>NHWC
253       <li>NCHW
254      </ul>
255  <td>
256    <table>
257    <tr><th>src0<th>src1<th>dst
258    <tr><td>All<td>s32<td>All
259    </table>
260<tr>
261  <td rowspan="2">BitwiseAnd
262  <td rowspan="2" style="width:200px;"> Function to perform bitwise AND between 2 tensors.
263  <td rowspan="2">
264      <ul>
265       <li>ANEURALNETWORKS_LOGICAL_AND
266      </ul>
267  <td>NEBitwiseAnd
268  <td>
269      <ul>
270       <li>All
271      </ul>
272  <td>
273    <table>
274    <tr><th>src<th>dst
275    <tr><td>U8<td>U8
276    </table>
277<tr>
278  <td>CLBitwiseAnd
279  <td>
280      <ul>
281       <li>All
282      </ul>
283  <td>
284    <table>
285    <tr><th>src<th>dst
286    <tr><td>U8<td>U8
287    </table>
288<tr>
289  <td rowspan="2">BitwiseNot
290  <td rowspan="2" style="width:200px;"> Function to perform bitwise NOT.
291  <td rowspan="2">
292      <ul>
293       <li>ANEURALNETWORKS_LOGICAL_NOT
294      </ul>
295  <td>NEBitwiseNot
296  <td>
297      <ul>
298       <li>All
299      </ul>
300  <td>
301    <table>
302    <tr><th>src<th>dst
303    <tr><td>U8<td>U8
304    </table>
305<tr>
306  <td>CLBitwiseNot
307  <td>
308      <ul>
309       <li>All
310      </ul>
311  <td>
312    <table>
313    <tr><th>src<th>dst
314    <tr><td>U8<td>U8
315    </table>
316<tr>
317  <td rowspan="2">BitwiseOr
318  <td rowspan="2" style="width:200px;"> Function to perform bitwise OR between 2 tensors.
319  <td rowspan="2">
320      <ul>
321       <li>ANEURALNETWORKS_LOGICAL_OR
322      </ul>
323  <td>NEBitwiseOr
324  <td>
325      <ul>
326       <li>All
327      </ul>
328  <td>
329    <table>
330    <tr><th>src<th>dst
331    <tr><td>U8<td>U8
332    </table>
333<tr>
334  <td>CLBitwiseOr
335  <td>
336      <ul>
337       <li>All
338      </ul>
339  <td>
340    <table>
341    <tr><th>src<th>dst
342    <tr><td>U8<td>U8
343    </table>
344<tr>
345  <td rowspan="2">BitwiseXor
346  <td rowspan="2" style="width:200px;"> Function to perform bitwise XOR between 2 tensors.
347  <td rowspan="2">
348      <ul>
349       <li>n/a
350      </ul>
351  <td>NEBitwiseXor
352  <td>
353      <ul>
354       <li>All
355      </ul>
356  <td>
357    <table>
358    <tr><th>src<th>dst
359    <tr><td>U8<td>U8
360    </table>
361<tr>
362  <td>CLBitwiseXor
363  <td>
364      <ul>
365       <li>All
366      </ul>
367  <td>
368    <table>
369    <tr><th>src<th>dst
370    <tr><td>U8<td>U8
371    </table>
372<tr>
373  <td rowspan="2">BoundingBoxTransform
374  <td rowspan="2" style="width:200px;"> Transform proposal bounding boxes to target bounding box using bounding box deltas.
375  <td rowspan="2">
376      <ul>
377       <li>n/a
378      </ul>
379  <td>NEBoundingBoxTransform
380  <td>
381      <ul>
382       <li>NHWC
383       <li>NCHW
384      </ul>
385  <td>
386    <table>
387    <tr><th>src0<th>src1<th>dst
388    <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
389    <tr><td>F16<td>F16<td>F16
390    <tr><td>F32<td>F32<td>F32
391    </table>
392<tr>
393  <td>CLBoundingBoxTransform
394  <td>
395      <ul>
396       <li>NHWC
397       <li>NCHW
398      </ul>
399  <td>
400    <table>
401    <tr><th>src0<th>src1<th>dst
402    <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
403    <tr><td>F16<td>F16<td>F16
404    <tr><td>F32<td>F32<td>F32
405    </table>
406<tr>
407  <td rowspan="2">Cast
408  <td rowspan="2" style="width:200px;"> Function to cast a tensor.
409  <td rowspan="2">
410      <ul>
411       <li>ANEURALNETWORKS_CAST
412      </ul>
413  <td>NECast
414  <td>
415      <ul>
416       <li>All
417      </ul>
418  <td>
419    <table>
420    <tr><th>src<th>dst
421    <tr><td>QASYMM8_SIGNED<td>S16, S32, F32, F16
422    <tr><td>QASYMM8<td>U16, S16, S32, F32, F16
423    <tr><td>U8<td>U16, S16, S32, F32, F16
424    <tr><td>U16<td>U8, U32
425    <tr><td>S16<td>QASYMM8_SIGNED, U8, S32
426    <tr><td>F16<td>QASYMM8_SIGNED, QASYMM8, F32, S32, U8
427    <tr><td>S32<td>QASYMM8_SIGNED, QASYMM8, F16, F32, U8
428    <tr><td>F32<td>QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
429    </table>
430<tr>
431  <td>CLCast
432  <td>
433      <ul>
434       <li>All
435      </ul>
436  <td>
437    <table>
438    <tr><th>src<th>dst
439    <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
440    <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
441    <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
442    <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
443    <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
444    <tr><td>F16<td>U8, S8, U16, S16, U32, F32
445    <tr><td>F32<td>U8, S8, U16, S16, U32, F16
446    </table>
447<tr>
448  <td rowspan="2">ChannelShuffleLayer
449  <td rowspan="2" style="width:200px;"> Function to shuffle the channels of the input tensor.
450  <td rowspan="2">
451      <ul>
452       <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
453      </ul>
454  <td>NEChannelShuffleLayer
455  <td>
456      <ul>
457       <li>NCHW
458       <li>NHWC
459      </ul>
460  <td>
461    <table>
462    <tr><th>src<th>dst
463    <tr><td>All<td>All
464    </table>
465<tr>
466  <td>CLChannelShuffleLayer
467  <td>
468      <ul>
469       <li>NCHW
470       <li>NHWC
471      </ul>
472  <td>
473    <table>
474    <tr><th>src<th>dst
475    <tr><td>All<td>All
476    </table>
477<tr>
478  <td rowspan="1">Comparison
479  <td rowspan="1" style="width:200px;"> Function to compare 2 tensors.
480  <td rowspan="1">
481      <ul>
482       <li>ANEURALNETWORKS_EQUAL
483       <li>ANEURALNETWORKS_GREATER
484       <li>ANEURALNETWORKS_GREATER_EQUAL
485       <li>ANEURALNETWORKS_LESS
486       <li>ANEURALNETWORKS_LESS_EQUAL
487       <li>ANEURALNETWORKS_NOT_EQUAL
488      </ul>
489  <td>CLComparison
490  <td>
491      <ul>
492       <li>All
493      </ul>
494  <td>
495    <table>
496    <tr><th>src0<th>src1<th>dst
497    <tr><td>All<td>All<td>U8
498    </table>
499<tr>
500  <td rowspan="2">ConcatenateLayer
501  <td rowspan="2" style="width:200px;"> Function to concatenate tensors along a given axis.
502  <td rowspan="2">
503      <ul>
504       <li>ANEURALNETWORKS_CONCATENATION
505      </ul>
506  <td>NEConcatenateLayer
507  <td>
508      <ul>
509       <li>All
510      </ul>
511  <td>
512    <table>
513    <tr><th>src<th>dst
514    <tr><td>QASYMM8<td>QASYMM8
515    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
516    <tr><td>F16<td>F16
517    <tr><td>F32<td>F32
518    </table>
519<tr>
520  <td>CLConcatenateLayer
521  <td>
522      <ul>
523       <li>All
524      </ul>
525  <td>
526    <table>
527    <tr><th>src<th>dst
528    <tr><td>QASYMM8<td>QASYMM8
529    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
530    <tr><td>F16<td>F16
531    <tr><td>F32<td>F32
532    </table>
533<tr>
534  <td rowspan="2">ConvertFullyConnectedWeights
535  <td rowspan="2" style="width:200px;"> Function to transpose the weights for the fully connected layer.
536  <td rowspan="2">
537      <ul>
538       <li>n/a
539      </ul>
540  <td>NEConvertFullyConnectedWeights
541  <td>
542      <ul>
543       <li>NHWC
544       <li>NCHW
545      </ul>
546  <td>
547    <table>
548    <tr><th>src<th>dst
549    <tr><td>All<td>All
550    </table>
551<tr>
552  <td>CLConvertFullyConnectedWeights
553  <td>
554      <ul>
555       <li>NHWC
556       <li>NCHW
557      </ul>
558  <td>
559    <table>
560    <tr><th>src<th>dst
561    <tr><td>All<td>All
562    </table>
563<tr>
564  <td rowspan="2">ConvolutionLayer
565  <td rowspan="2" style="width:200px;"> Function to compute a convolution layer.
566  <td rowspan="2">
567      <ul>
568       <li>ANEURALNETWORKS_CONV_2D
569      </ul>
570  <td>NEConvolutionLayer
571  <td>
572      <ul>
573       <li>NHWC
574       <li>NCHW
575      </ul>
576  <td>
577    <table>
578    <tr><th>src0<th>src1<th>src2<th>dst
579    <tr><td>F16<td>F16<td>F16<td>F16
580    <tr><td>F32<td>F32<td>F32<td>F32
581    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
582    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
583    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
584    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
585    </table>
586<tr>
587  <td>CLConvolutionLayer
588  <td>
589      <ul>
590       <li>NHWC
591       <li>NCHW
592      </ul>
593  <td>
594    <table>
595    <tr><th>src0<th>src1<th>src2<th>dst
596    <tr><td>F16<td>F16<td>F16<td>F16
597    <tr><td>F32<td>F32<td>F32<td>F32
598    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
599    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
600    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
601    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
602    </table>
603<tr>
604  <td rowspan="2">Conv3D
605  <td rowspan="2" style="width:200px;"> Function to compute a 3d convolution layer.
606  <td rowspan="2">
607      <ul>
608       <li>ANEURALNETWORKS_CONV_3D
609      </ul>
610  <td>NEConv3D
611  <td>
612      <ul>
613       <li>NDHWC
614      </ul>
615  <td>
616    <table>
617    <tr><th>src0<th>src1<th>src2<th>dst
618    <tr><td>F16<td>F16<td>F16<td>F16
619    <tr><td>F32<td>F32<td>F32<td>F32
620    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
621    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
622    </table>
623<tr>
624  <td>CLConv3D
625  <td>
626      <ul>
627       <li>NDHWC
628      </ul>
629  <td>
630    <table>
631    <tr><th>src0<th>src1<th>src2<th>dst
632    <tr><td>F16<td>F16<td>F16<td>F16
633    <tr><td>F32<td>F32<td>F32<td>F32
634    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
635    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
636    </table>
637<tr>
638  <td rowspan="2">Copy
639  <td rowspan="2" style="width:200px;"> Function to copy a tensor.
640  <td rowspan="2">
641      <ul>
642       <li>n/a
643      </ul>
644  <td>NECopy
645  <td>
646      <ul>
647       <li>All
648      </ul>
649  <td>
650    <table>
651    <tr><th>src<th>dst
652    <tr><td>All<td>All
653    </table>
654<tr>
655  <td>CLCopy
656  <td>
657      <ul>
658       <li>All
659      </ul>
660  <td>
661    <table>
662    <tr><th>src<th>dst
663    <tr><td>All<td>All
664    </table>
665<tr>
666  <td rowspan="1">Crop
667  <td rowspan="1" style="width:200px;"> Performs a copy of input tensor to the output tensor.
668  <td rowspan="1">
669      <ul>
670       <li>n/a
671      </ul>
672  <td>CLCrop
673  <td>
674      <ul>
675       <li>NHWC
676      </ul>
677  <td>
678    <table>
679    <tr><th>src<th>dst
680    <tr><td>All<td>F32
681    </table>
682<tr>
683  <td rowspan="2">CropResize
684  <td rowspan="2" style="width:200px;"> Function to perform cropping and resizing.
685  <td rowspan="2">
686      <ul>
687       <li>n/a
688      </ul>
689  <td>NECropResize
690  <td>
691      <ul>
692       <li>NHWC
693      </ul>
694  <td>
695    <table>
696    <tr><th>src0<th>src1<th>src2<th>dst
697    <tr><td>All<td>F32<td>F32<td>F32
698    </table>
699<tr>
700  <td>CLCropResize
701  <td>
702      <ul>
703       <li>NHWC
704      </ul>
705  <td>
706    <table>
707    <tr><th>src0<th>src1<th>src2<th>dst
708    <tr><td>All<td>F32<td>F32<td>F32
709    </table>
710<tr>
711  <td rowspan="2">DeconvolutionLayer
712  <td rowspan="2" style="width:200px;"> Function to compute a deconvolution or transpose convolution.
713  <td rowspan="2">
714      <ul>
715       <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
716      </ul>
717  <td>NEDeconvolutionLayer
718  <td>
719      <ul>
720       <li>NHWC
721       <li>NCHW
722      </ul>
723  <td>
724    <table>
725    <tr><th>src0<th>src1<th>src2<th>dst
726    <tr><td>F16<td>F16<td>F16<td>F16
727    <tr><td>F32<td>F32<td>F32<td>F32
728    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
729    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
730    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
731    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
732    </table>
733<tr>
734  <td>CLDeconvolutionLayer
735  <td>
736      <ul>
737       <li>NHWC
738       <li>NCHW
739      </ul>
740  <td>
741    <table>
742    <tr><th>src0<th>src1<th>src2<th>dst
743    <tr><td>F16<td>F16<td>F16<td>F16
744    <tr><td>F32<td>F32<td>F32<td>F32
745    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
746    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
747    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
748    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
749    </table>
750<tr>
751  <td rowspan="1">DeconvolutionLayerUpsample
752  <td rowspan="1" style="width:200px;"> Function to execute deconvolution upsample on OpenCL.
753  <td rowspan="1">
754      <ul>
755       <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
756      </ul>
757  <td>CLDeconvolutionLayerUpsample
758  <td>
759      <ul>
760       <li>NHWC
761       <li>NCHW
762      </ul>
763  <td>
764    <table>
765    <tr><th>src<th>dst
766    <tr><td>All<td>All
767    </table>
768<tr>
769  <td rowspan="2">DepthConvertLayer
770  <td rowspan="2" style="width:200px;"> Performs a down-scaling depth conversion.
771  <td rowspan="2">
772      <ul>
773       <li>n/a
774      </ul>
775  <td>NEDepthConvertLayer
776  <td>
777      <ul>
778       <li>All
779      </ul>
780  <td>
781    <table>
782    <tr><th>src<th>dst
783    <tr><td>QASYMM8<td>F16, F32
784    <tr><td>U8<td>U16, S16, S32
785    <tr><td>U16<td>U8, U32
786    <tr><td>S16<td>U8, S32
787    <tr><td>BFLOAT16<td>F32
788    <tr><td>F16<td>QASYMM8, F32
789    <tr><td>F32<td>QASYMM8, F16, BFLOAT16
790    </table>
791<tr>
792  <td>CLDepthConvertLayer
793  <td>
794      <ul>
795       <li>All
796      </ul>
797  <td>
798    <table>
799    <tr><th>src<th>dst
800    <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
801    <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
802    <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
803    <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
804    <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
805    <tr><td>F16<td>U8, S8, U16, S16, U32, F32
806    <tr><td>F32<td>U8, S8, U16, S16, U32, F16
807    </table>
808<tr>
809  <td rowspan="2">DepthToSpaceLayer
810  <td rowspan="2" style="width:200px;"> Depth to Space transformation.
811  <td rowspan="2">
812      <ul>
813       <li>ANEURALNETWORKS_DEPTH_TO_SPACE
814      </ul>
815  <td>NEDepthToSpaceLayer
816  <td>
817      <ul>
818       <li>NHWC
819       <li>NCHW
820      </ul>
821  <td>
822    <table>
823    <tr><th>src<th>dst
824    <tr><td>All<td>All
825    </table>
826<tr>
827  <td>CLDepthToSpaceLayer
828  <td>
829      <ul>
830       <li>NHWC
831       <li>NCHW
832      </ul>
833  <td>
834    <table>
835    <tr><th>src<th>dst
836    <tr><td>All<td>All
837    </table>
838<tr>
839  <td rowspan="2">DepthwiseConvolutionLayer
840  <td rowspan="2" style="width:200px;"> Function to perform depthwise separable convolution.
841  <td rowspan="2">
842      <ul>
843       <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
844      </ul>
845  <td>NEDepthwiseConvolutionLayer
846  <td>
847      <ul>
848       <li>NHWC
849       <li>NCHW
850      </ul>
851  <td>
852    <table>
853    <tr><th>src0<th>src1<th>src2<th>dst
854    <tr><td>F16<td>F16<td>F16<td>F16
855    <tr><td>F32<td>F32<td>F32<td>F32
856    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
857    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
858    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
859    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
860    </table>
861<tr>
862  <td>CLDepthwiseConvolutionLayer
863  <td>
864      <ul>
865       <li>NHWC
866       <li>NCHW
867      </ul>
868  <td>
869    <table>
870    <tr><th>src0<th>src1<th>src2<th>dst
871    <tr><td>F16<td>F16<td>F16<td>F16
872    <tr><td>F32<td>F32<td>F32<td>F32
873    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
874    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
875    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
876    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
877    </table>
878<tr>
879  <td rowspan="2">DequantizationLayer
880  <td rowspan="2" style="width:200px;"> Function to dequantize the values in a tensor.
881  <td rowspan="2">
882      <ul>
883       <li>ANEURALNETWORKS_DEQUANTIZE
884      </ul>
885  <td>NEDequantizationLayer
886  <td>
887      <ul>
888       <li>All
889      </ul>
890  <td>
891    <table>
892    <tr><th>src<th>dst
893    <tr><td>QASYMM8<td>F16, F32
894    <tr><td>QASYMM8_SIGNED<td>F16, F32
895    <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
896    <tr><td>QSYMM8<td>F16, F32
897    <tr><td>QSYMM16<td>F16, F32
898    </table>
899<tr>
900  <td>CLDequantizationLayer
901  <td>
902      <ul>
903       <li>All
904      </ul>
905  <td>
906    <table>
907    <tr><th>src<th>dst
908    <tr><td>QASYMM8<td>F16, F32
909    <tr><td>QASYMM8_SIGNED<td>F16, F32
910    <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
911    <tr><td>QSYMM8<td>F16, F32
912    <tr><td>QSYMM16<td>F16, F32
913    </table>
914<tr>
915  <td rowspan="1">DetectionPostProcessLayer
916  <td rowspan="1" style="width:200px;"> Function to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
917  <td rowspan="1">
918      <ul>
919       <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
920      </ul>
921  <td>NEDetectionPostProcessLayer
922  <td>
923      <ul>
924       <li>All
925      </ul>
926  <td>
927    <table>
928    <tr><th>src0 - src2<th>dst0 - dst3
929    <tr><td>QASYMM8<td>F32
930    <tr><td>QASYMM8_SIGNED<td>F32
931    <tr><td>F32<td>F32
932    </table>
933<tr>
934  <td rowspan="2">DirectConvolutionLayer
935  <td rowspan="2" style="width:200px;"> Function to compute direct convolution.
936  <td rowspan="2">
937      <ul>
938       <li>ANEURALNETWORKS_CONV_2D
939      </ul>
940  <td>NEDirectConvolutionLayer
941  <td>
942      <ul>
943       <li>NHWC
944       <li>NCHW
945      </ul>
946  <td>
947    <table>
948    <tr><th>src0<th>src1<th>src2<th>dst
949    <tr><td>F16<td>F16<td>F16<td>F16
950    <tr><td>F32<td>F32<td>F32<td>F32
951    </table>
952<tr>
953  <td>CLDirectConvolutionLayer
954  <td>
955      <ul>
956       <li>NHWC
957       <li>NCHW
958      </ul>
959  <td>
960    <table>
961    <tr><th>src0<th>src1<th>src2<th>dst
962    <tr><td>F16<td>F16<td>F16<td>F16
963    <tr><td>F32<td>F32<td>F32<td>F32
964    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
965    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
966    </table>
967<tr>
968  <td rowspan="1">DirectDeconvolutionLayer
969  <td rowspan="1" style="width:200px;"> Function to run the deconvolution layer.
970  <td rowspan="1">
971      <ul>
972       <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
973      </ul>
974  <td>CLDirectDeconvolutionLayer
975  <td>
976      <ul>
977       <li>NHWC
978       <li>NCHW
979      </ul>
980  <td>
981    <table>
982    <tr><th>src0<th>src1<th>src2<th>dst
983    <tr><td>F16<td>F16<td>F16<td>F16
984    <tr><td>F32<td>F32<td>F32<td>F32
985    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
986    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
987    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
988    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
989    </table>
990<tr>
991  <td rowspan="13">ElementwiseOperations
992  <td rowspan="13" style="width:200px;"> Function to perform in Cpu: - Div - Max - Min - Pow - SquaredDiff - Comparisons (Equal, greater, greater_equal, less, less_equal, not_equal) Function to perform in CL: - Add - Sub - Div - Max - Min - Pow - SquaredDiff
993  <td rowspan="13">
994      <ul>
995       <li>ANEURALNETWORKS_MAXIMUM
996       <li>ANEURALNETWORKS_MINIMUM
997       <li>ANEURALNETWORKS_POW
998       <li>ANEURALNETWORKS_DIV
999       <li>ANEURALNETWORKS_ADD
1000       <li>ANEURALNETWORKS_SUB
1001       <li>ANEURALNETWORKS_EQUAL
1002       <li>ANEURALNETWORKS_GREATER
1003       <li>ANEURALNETWORKS_GREATER_EQUAL
1004       <li>ANEURALNETWORKS_LESS
1005       <li>ANEURALNETWORKS_LESS_EQUAL
1006       <li>ANEURALNETWORKS_NOT_EQUAL
1007      </ul>
1008  <td>NEElementwiseMax
1009  <td>
1010      <ul>
1011       <li>All
1012      </ul>
1013  <td>
1014    <table>
1015    <tr><th>src0<th>src1<th>dst
1016    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1017    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1018    <tr><td>S32<td>S32<td>S32
1019    <tr><td>S16<td>S16<td>S16
1020    <tr><td>F16<td>F16<td>F16
1021    <tr><td>F32<td>F32<td>F32
1022    </table>
1023<tr>
1024  <td>NEElementwiseMin
1025  <td>
1026      <ul>
1027       <li>All
1028      </ul>
1029  <td>
1030    <table>
1031    <tr><th>src0<th>src1<th>dst
1032    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1033    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1034    <tr><td>S32<td>S32<td>S32
1035    <tr><td>S16<td>S16<td>S16
1036    <tr><td>F16<td>F16<td>F16
1037    <tr><td>F32<td>F32<td>F32
1038    </table>
1039<tr>
1040  <td>NEElementwiseSquaredDiff
1041  <td>
1042      <ul>
1043       <li>All
1044      </ul>
1045  <td>
1046    <table>
1047    <tr><th>src0<th>src1<th>dst
1048    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1049    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1050    <tr><td>S32<td>S32<td>S32
1051    <tr><td>S16<td>S16<td>S16
1052    <tr><td>F16<td>F16<td>F16
1053    <tr><td>F32<td>F32<td>F32
1054    </table>
1055<tr>
1056  <td>NEElementwiseDivision
1057  <td>
1058      <ul>
1059       <li>All
1060      </ul>
1061  <td>
1062    <table>
1063    <tr><th>src0<th>src1<th>dst
1064    <tr><td>F16<td>F16<td>F16
1065    <tr><td>F32<td>F32<td>F32
1066    </table>
1067<tr>
1068  <td>NEElementwisePower
1069  <td>
1070      <ul>
1071       <li>All
1072      </ul>
1073  <td>
1074    <table>
1075    <tr><th>src0<th>src1<th>dst
1076    <tr><td>F16<td>F16<td>F16
1077    <tr><td>F32<td>F32<td>F32
1078    </table>
1079<tr>
1080  <td>NEElementwiseComparison
1081  <td>
1082      <ul>
1083       <li>All
1084      </ul>
1085  <td>
1086    <table>
1087    <tr><th>src0<th>src1<th>dst
1088    <tr><td>QASYMM8<td>QASYMM8<td>U8
1089    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>U8
1090    <tr><td>S32<td>S32<td>U8
1091    <tr><td>U8<td>U8<td>U8
1092    <tr><td>S16<td>S16<td>U8
1093    <tr><td>F16<td>F16<td>U8
1094    <tr><td>F32<td>F32<td>U8
1095    </table>
1096<tr>
1097  <td>CLArithmeticAddition
1098  <td>
1099      <ul>
1100       <li>All
1101      </ul>
1102  <td>
1103    <table>
1104    <tr><th>src0<th>src1<th>dst
1105    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1106    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1107    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1108    <tr><td>U8<td>U8<td>U8
1109    <tr><td>U8<td>U8<td>S16
1110    <tr><td>U8<td>S16<td>S16
1111    <tr><td>S16<td>U8<td>S16
1112    <tr><td>S16<td>S16<td>S16
1113    <tr><td>S32<td>S32<td>S32
1114    <tr><td>F16<td>F16<td>F16
1115    <tr><td>F32<td>F32<td>F32
1116    </table>
1117<tr>
1118  <td>CLArithmeticSubtraction
1119  <td>
1120      <ul>
1121       <li>All
1122      </ul>
1123  <td>
1124    <table>
1125    <tr><th>src0<th>src1<th>dst
1126    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1127    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1128    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1129    <tr><td>U8<td>U8<td>U8
1130    <tr><td>U8<td>U8<td>S16
1131    <tr><td>U8<td>S16<td>S16
1132    <tr><td>S16<td>U8<td>S16
1133    <tr><td>S16<td>S16<td>S16
1134    <tr><td>S32<td>S32<td>S32
1135    <tr><td>F16<td>F16<td>F16
1136    <tr><td>F32<td>F32<td>F32
1137    </table>
1138<tr>
1139  <td>CLArithmeticDivision
1140  <td>
1141      <ul>
1142       <li>All
1143      </ul>
1144  <td>
1145    <table>
1146    <tr><th>src0<th>src1<th>dst
1147    <tr><td>F16<td>F16<td>F16
1148    <tr><td>F32<td>F32<td>F32
1149    </table>
1150<tr>
1151  <td>CLElementwiseMax
1152  <td>
1153      <ul>
1154       <li>All
1155      </ul>
1156  <td>
1157    <table>
1158    <tr><th>src0<th>src1<th>dst
1159    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1160    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1161    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1162    <tr><td>U8<td>U8<td>U8
1163    <tr><td>S16<td>S16<td>S16
1164    <tr><td>S32<td>S32<td>S32
1165    <tr><td>U32<td>U32<td>U32
1166    <tr><td>F16<td>F16<td>F16
1167    <tr><td>F32<td>F32<td>F32
1168    </table>
1169<tr>
1170  <td>CLElementwiseMin
1171  <td>
1172      <ul>
1173       <li>All
1174      </ul>
1175  <td>
1176    <table>
1177    <tr><th>src0<th>src1<th>dst
1178    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1179    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1180    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1181    <tr><td>U8<td>U8<td>U8
1182    <tr><td>S16<td>S16<td>S16
1183    <tr><td>S32<td>S32<td>S32
1184    <tr><td>U32<td>U32<td>U32
1185    <tr><td>F16<td>F16<td>F16
1186    <tr><td>F32<td>F32<td>F32
1187    </table>
1188<tr>
1189  <td>CLElementwiseSquaredDiff
1190  <td>
1191      <ul>
1192       <li>All
1193      </ul>
1194  <td>
1195    <table>
1196    <tr><th>src0<th>src1<th>dst
1197    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1198    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1199    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1200    <tr><td>U8<td>U8<td>U8
1201    <tr><td>S16<td>S16<td>S16
1202    <tr><td>F16<td>F16<td>F16
1203    <tr><td>F32<td>F32<td>F32
1204    </table>
1205<tr>
1206  <td>CLElementwisePower
1207  <td>
1208      <ul>
1209       <li>All
1210      </ul>
1211  <td>
1212    <table>
1213    <tr><th>src0<th>src1<th>dst
1214    <tr><td>F16<td>F16<td>F16
1215    <tr><td>F32<td>F32<td>F32
1216    </table>
1217<tr>
1218  <td rowspan="8">ElementwiseUnaryLayer
1219  <td rowspan="8" style="width:200px;"> Function to perform: - Rsqrt - Exp - Neg - Log - Abs - Round - Sin
1220  <td rowspan="8">
1221      <ul>
1222       <li>ANEURALNETWORKS_ABS
1223       <li>ANEURALNETWORKS_EXP
1224       <li>ANEURALNETWORKS_LOG
1225       <li>ANEURALNETWORKS_NEG
1226       <li>ANEURALNETWORKS_RSQRT
1227       <li>ANEURALNETWORKS_SIN
1228      </ul>
1229  <td>NEElementwiseUnaryLayer
1230  <td>
1231      <ul>
1232       <li>All
1233      </ul>
1234  <td>
1235    <table>
1236    <tr><th>src<th>dst
1237    <tr><td>F16<td>F16
1238    <tr><td>F32<td>F32
1239    <tr><td>S32<td>S32
1240    </table>
1241<tr>
1242  <td>CLRsqrtLayer
1243  <td>
1244      <ul>
1245       <li>All
1246      </ul>
1247  <td>
1248    <table>
1249    <tr><th>src<th>dst
1250    <tr><td>F16<td>F16
1251    <tr><td>F32<td>F32
1252    </table>
1253<tr>
1254  <td>CLExpLayer
1255  <td>
1256      <ul>
1257       <li>All
1258      </ul>
1259  <td>
1260    <table>
1261    <tr><th>src<th>dst
1262    <tr><td>F16<td>F16
1263    <tr><td>F32<td>F32
1264    </table>
1265<tr>
1266  <td>CLNegLayer
1267  <td>
1268      <ul>
1269       <li>All
1270      </ul>
1271  <td>
1272    <table>
1273    <tr><th>src<th>dst
1274    <tr><td>F16<td>F16
1275    <tr><td>F32<td>F32
1276    <tr><td>S32<td>S32
1277    </table>
1278<tr>
1279  <td>CLSinLayer
1280  <td>
1281      <ul>
1282       <li>All
1283      </ul>
1284  <td>
1285    <table>
1286    <tr><th>src<th>dst
1287    <tr><td>F16<td>F16
1288    <tr><td>F32<td>F32
1289    </table>
1290<tr>
1291  <td>CLLogLayer
1292  <td>
1293      <ul>
1294       <li>All
1295      </ul>
1296  <td>
1297    <table>
1298    <tr><th>src<th>dst
1299    <tr><td>F16<td>F16
1300    <tr><td>F32<td>F32
1301    </table>
1302<tr>
1303  <td>CLAbsLayer
1304  <td>
1305      <ul>
1306       <li>All
1307      </ul>
1308  <td>
1309    <table>
1310    <tr><th>src<th>dst
1311    <tr><td>F16<td>F16
1312    <tr><td>F32<td>F32
1313    </table>
1314<tr>
1315  <td>CLRoundLayer
1316  <td>
1317      <ul>
1318       <li>All
1319      </ul>
1320  <td>
1321    <table>
1322    <tr><th>src<th>dst
1323    <tr><td>F16<td>F16
1324    <tr><td>F32<td>F32
1325    </table>
1326<tr>
1327  <td rowspan="2">FFT1D
1328  <td rowspan="2" style="width:200px;"> Fast Fourier Transform 1D.
1329  <td rowspan="2">
1330      <ul>
1331       <li>n/a
1332      </ul>
1333  <td>NEFFT1D
1334  <td>
1335      <ul>
1336       <li>All
1337      </ul>
1338  <td>
1339    <table>
1340    <tr><th>src<th>dst
1341    <tr><td>F32<td>F32
1342    </table>
1343<tr>
1344  <td>CLFFT1D
1345  <td>
1346      <ul>
1347       <li>All
1348      </ul>
1349  <td>
1350    <table>
1351    <tr><th>src<th>dst
1352    <tr><td>F32<td>F32
1353    <tr><td>F16<td>F16
1354    </table>
1355<tr>
1356  <td rowspan="2">FFT2D
1357  <td rowspan="2" style="width:200px;"> Fast Fourier Transform 2D.
1358  <td rowspan="2">
1359      <ul>
1360       <li>n/a
1361      </ul>
1362  <td>NEFFT2D
1363  <td>
1364      <ul>
1365       <li>All
1366      </ul>
1367  <td>
1368    <table>
1369    <tr><th>src<th>dst
1370    <tr><td>F32<td>F32
1371    </table>
1372<tr>
1373  <td>CLFFT2D
1374  <td>
1375      <ul>
1376       <li>All
1377      </ul>
1378  <td>
1379    <table>
1380    <tr><th>src<th>dst
1381    <tr><td>F32<td>F32
1382    <tr><td>F16<td>F16
1383    </table>
1384<tr>
1385  <td rowspan="2">FFTConvolutionLayer
1386  <td rowspan="2" style="width:200px;"> Fast Fourier Transform Convolution.
1387  <td rowspan="2">
1388      <ul>
1389       <li>ANEURALNETWORKS_CONV_2D
1390      </ul>
1391  <td>NEFFTConvolutionLayer
1392  <td>
1393      <ul>
1394       <li>All
1395      </ul>
1396  <td>
1397    <table>
1398    <tr><th>src<th>dst
1399    <tr><td>F32<td>F32
1400    </table>
1401<tr>
1402  <td>CLFFTConvolutionLayer
1403  <td>
1404      <ul>
1405       <li>All
1406      </ul>
1407  <td>
1408    <table>
1409    <tr><th>src<th>dst
1410    <tr><td>F32<td>F32
1411    <tr><td>F16<td>F16
1412    </table>
1413<tr>
1414  <td rowspan="2">Fill
1415  <td rowspan="2" style="width:200px;"> Set the values of a tensor with a given value.
1416  <td rowspan="2">
1417      <ul>
1418       <li>ANEURALNETWORKS_FILL
1419      </ul>
1420  <td>NEFill
1421  <td>
1422      <ul>
1423       <li>All
1424      </ul>
1425  <td>
1426    <table>
1427    <tr><th>src<th>dst
1428    <tr><td>All<td>All
1429    </table>
1430<tr>
1431  <td>CLFill
1432  <td>
1433      <ul>
1434       <li>All
1435      </ul>
1436  <td>
1437    <table>
1438    <tr><th>src<th>dst
1439    <tr><td>All<td>All
1440    </table>
1441<tr>
1442  <td rowspan="1">FillBorder
1443  <td rowspan="1" style="width:200px;"> Function to fill the borders within the XY-planes.
1444  <td rowspan="1">
1445      <ul>
1446       <li>n/a
1447      </ul>
1448  <td>NEFillBorder
1449  <td>
1450      <ul>
1451       <li>All
1452      </ul>
1453  <td>
1454    <table>
1455    <tr><th>src<th>dst
1456    <tr><td>All<td>All
1457    </table>
1458<tr>
1459  <td rowspan="2">FlattenLayer
1460  <td rowspan="2" style="width:200px;"> Reshape a tensor to be 1D
1461  <td rowspan="2">
1462      <ul>
1463       <li>ANEURALNETWORKS_RESHAPE
1464      </ul>
1465  <td>NEFlattenLayer
1466  <td>
1467      <ul>
1468       <li>All
1469      </ul>
1470  <td>
1471    <table>
1472    <tr><th>src<th>dst
1473    <tr><td>All<td>All
1474    </table>
1475<tr>
1476  <td>CLFlattenLayer
1477  <td>
1478      <ul>
1479       <li>All
1480      </ul>
1481  <td>
1482    <table>
1483    <tr><th>src<th>dst
1484    <tr><td>All<td>All
1485    </table>
1486<tr>
1487  <td rowspan="2">Floor
1488  <td rowspan="2" style="width:200px;"> Round the value to the lowest number.
1489  <td rowspan="2">
1490      <ul>
1491       <li>ANEURALNETWORKS_FLOOR
1492      </ul>
1493  <td>NEFloor
1494  <td>
1495      <ul>
1496       <li>All
1497      </ul>
1498  <td>
1499    <table>
1500    <tr><th>src<th>dst
1501    <tr><td>F32<td>F32
1502    <tr><td>F16<td>F16
1503    </table>
1504<tr>
1505  <td>CLFloor
1506  <td>
1507      <ul>
1508       <li>All
1509      </ul>
1510  <td>
1511    <table>
1512    <tr><th>src<th>dst
1513    <tr><td>F32<td>F32
1514    <tr><td>F16<td>F16
1515    </table>
1516<tr>
1517  <td rowspan="2">FullyConnectedLayer
1518  <td rowspan="2" style="width:200px;"> Function to perform a fully connected / dense layer.
1519  <td rowspan="2">
1520      <ul>
1521       <li>ANEURALNETWORKS_FULLY_CONNECTED
1522      </ul>
1523  <td>NEFullyConnectedLayer
1524  <td>
1525      <ul>
1526       <li>NHWC
1527       <li>NCHW
1528      </ul>
1529  <td>
1530    <table>
1531    <tr><th>src0<th>src1<th>src2<th>dst
1532    <tr><td>F16<td>F16<td>F16<td>F16
1533    <tr><td>F32<td>F32<td>F32<td>F32
1534    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1535    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1536    </table>
1537<tr>
1538  <td>CLFullyConnectedLayer
1539  <td>
1540      <ul>
1541       <li>NHWC
1542       <li>NCHW
1543      </ul>
1544  <td>
1545    <table>
1546    <tr><th>src0<th>src1<th>src2<th>dst
1547    <tr><td>F16<td>F16<td>F16<td>F16
1548    <tr><td>F32<td>F32<td>F32<td>F32
1549    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1550    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1551    </table>
1552<tr>
1553  <td rowspan="2">FuseBatchNormalization
1554  <td rowspan="2" style="width:200px;"> Function to fuse the batch normalization node to a preceding convolution node.
1555  <td rowspan="2">
1556      <ul>
1557       <li>n/a
1558      </ul>
1559  <td>NEFuseBatchNormalization
1560  <td>
1561      <ul>
1562       <li>NHWC
1563       <li>NCHW
1564      </ul>
1565  <td>
1566    <table>
1567    <tr><th>src<th>dst
1568    <tr><td>F32<td>F32
1569    <tr><td>F16<td>F16
1570    </table>
1571<tr>
1572  <td>CLFuseBatchNormalization
1573  <td>
1574      <ul>
1575       <li>NHWC
1576       <li>NCHW
1577      </ul>
1578  <td>
1579    <table>
1580    <tr><th>src<th>dst
1581    <tr><td>F32<td>F32
1582    <tr><td>F16<td>F16
1583    </table>
1584<tr>
1585  <td rowspan="2">Gather
1586  <td rowspan="2" style="width:200px;"> Performs the Gather operation along the chosen axis.
1587  <td rowspan="2">
1588      <ul>
1589       <li>ANEURALNETWORKS_GATHER
1590      </ul>
1591  <td>NEGather
1592  <td>
1593      <ul>
1594       <li>All
1595      </ul>
1596  <td>
1597    <table>
1598    <tr><th>src<th>dst
1599    <tr><td>All<td>All
1600    </table>
1601<tr>
1602  <td>CLGather
1603  <td>
1604      <ul>
1605       <li>All
1606      </ul>
1607  <td>
1608    <table>
1609    <tr><th>src<th>dst
1610    <tr><td>All<td>All
1611    </table>
1612<tr>
1613  <td rowspan="2">GEMM
1614  <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1615  <td rowspan="2">
1616      <ul>
1617       <li>n/a
1618      </ul>
1619  <td>NEGEMM
1620  <td>
1621      <ul>
1622       <li>All
1623      </ul>
1624  <td>
1625    <table>
1626    <tr><th>src0<th>src1<th>src2<th>dst
1627    <tr><td>F32<td>F32<td>F32<td>F32
1628    <tr><td>F16<td>F16<td>F16<td>F16
1629    <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1630    </table>
1631<tr>
1632  <td>CLGEMM
1633  <td>
1634      <ul>
1635       <li>All
1636      </ul>
1637  <td>
1638    <table>
1639    <tr><th>src0<th>src1<th>src2<th>dst
1640    <tr><td>F32<td>F32<td>F32<td>F32
1641    <tr><td>F16<td>F16<td>F16<td>F16
1642    </table>
1643<tr>
1644  <td rowspan="1">GEMMConv2d
1645  <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1646  <td rowspan="1">
1647      <ul>
1648       <li>ANEURALNETWORKS_CONV_2D
1649      </ul>
1650  <td>NEGEMMConv2d
1651  <td>
1652      <ul>
1653       <li>All
1654      </ul>
1655  <td>
1656    <table>
1657    <tr><th>src0<th>src1<th>src2<th>dst
1658    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1659    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1660    <tr><td>F16<td>F16<td>F16<td>F16
1661    <tr><td>F32<td>F32<td>F32<td>F32
1662    <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1663    </table>
1664<tr>
1665  <td rowspan="2">GEMMConvolutionLayer
1666  <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1667  <td rowspan="2">
1668      <ul>
1669       <li>ANEURALNETWORKS_CONV_2D
1670      </ul>
1671  <td>NEGEMMConvolutionLayer
1672  <td>
1673      <ul>
1674       <li>NHWC
1675       <li>NCHW
1676      </ul>
1677  <td>
1678    <table>
1679    <tr><th>src0<th>src1<th>src2<th>dst
1680    <tr><td>F16<td>F16<td>F16<td>F16
1681    <tr><td>F32<td>F32<td>F32<td>F32
1682    <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1683    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1684    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1685    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1686    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1687    </table>
1688<tr>
1689  <td>CLGEMMConvolutionLayer
1690  <td>
1691      <ul>
1692       <li>NHWC
1693       <li>NCHW
1694      </ul>
1695  <td>
1696    <table>
1697    <tr><th>src0<th>src1<th>src2<th>dst
1698    <tr><td>F16<td>F16<td>F16<td>F16
1699    <tr><td>F32<td>F32<td>F32<td>F32
1700    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1701    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1702    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1703    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1704    </table>
1705<tr>
1706  <td rowspan="1">GEMMDeconvolutionLayer
1707  <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1708  <td rowspan="1">
1709      <ul>
1710       <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
1711      </ul>
1712  <td>CLGEMMDeconvolutionLayer
1713  <td>
1714      <ul>
1715       <li>NHWC
1716      </ul>
1717  <td>
1718    <table>
1719    <tr><th>src0<th>src1<th>src2<th>dst
1720    <tr><td>F16<td>F16<td>F16<td>F16
1721    <tr><td>F32<td>F32<td>F32<td>F32
1722    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1723    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1724    </table>
1725<tr>
1726  <td rowspan="2">GEMMLowpMatrixMultiplyCore
1727  <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1728  <td rowspan="2">
1729      <ul>
1730       <li>n/a
1731      </ul>
1732  <td>NEGEMMLowpMatrixMultiplyCore
1733  <td>
1734      <ul>
1735       <li>NHWC
1736       <li>NCHW
1737      </ul>
1738  <td>
1739    <table>
1740    <tr><th>src0<th>src1<th>src2<th>dst
1741    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1742    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1743    <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1744    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1745    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1746    <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1747    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1748    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1749    <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1750    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1751    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1752    <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1753    </table>
1754<tr>
1755  <td>CLGEMMLowpMatrixMultiplyCore
1756  <td>
1757      <ul>
1758       <li>NHWC
1759       <li>NCHW
1760      </ul>
1761  <td>
1762    <table>
1763    <tr><th>src0<th>src1<th>src2<th>dst
1764    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1765    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1766    <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1767    <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1768    <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1769    <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1770    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1771    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1772    <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1773    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1774    <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1775    <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1776    </table>
1777<tr>
1778  <td rowspan="2">GEMMLowpOutputStage
1779  <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1780  <td rowspan="2">
1781      <ul>
1782       <li>n/a
1783      </ul>
1784  <td>NEGEMMLowpOutputStage
1785  <td>
1786      <ul>
1787       <li>All
1788      </ul>
1789  <td>
1790    <table>
1791    <tr><th>src0<th>src1<th>dst
1792    <tr><td>S32<td>S32<td>QASYMM8
1793    <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1794    <tr><td>S32<td>S32<td>QSYMM16
1795    </table>
1796<tr>
1797  <td>CLGEMMLowpOutputStage
1798  <td>
1799      <ul>
1800       <li>All
1801      </ul>
1802  <td>
1803    <table>
1804    <tr><th>src0<th>src1<th>dst
1805    <tr><td>S32<td>S32<td>QASYMM8
1806    <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1807    <tr><td>S32<td>S32<td>QSYMM16
1808    </table>
1809<tr>
1810  <td rowspan="2">GenerateProposalsLayer
1811  <td rowspan="2" style="width:200px;"> Function to generate proposals for a RPN (Region Proposal Network).
1812  <td rowspan="2">
1813      <ul>
1814       <li>ANEURALNETWORKS_GENERATE_PROPOSALS
1815      </ul>
1816  <td>NEGenerateProposalsLayer
1817  <td>
1818      <ul>
1819       <li>All
1820      </ul>
1821  <td>
1822    <table>
1823    <tr><th>src0<th>src1<th>src2<th>dst
1824    <tr><td>F16<td>F16<td>F16<td>F16
1825    <tr><td>F32<td>F32<td>F32<td>F32
1826    <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1827    </table>
1828<tr>
1829  <td>CLGenerateProposalsLayer
1830  <td>
1831      <ul>
1832       <li>All
1833      </ul>
1834  <td>
1835    <table>
1836    <tr><th>src0<th>src1<th>src2<th>dst
1837    <tr><td>F16<td>F16<td>F16<td>F16
1838    <tr><td>F32<td>F32<td>F32<td>F32
1839    <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1840    </table>
1841<tr>
1842  <td rowspan="2">InstanceNormalizationLayer
1843  <td rowspan="2" style="width:200px;"> Function to perform a Instance normalization on a given axis.
1844  <td rowspan="2">
1845      <ul>
1846       <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1847      </ul>
1848  <td>NEInstanceNormalizationLayer
1849  <td>
1850      <ul>
1851       <li>NHWC
1852       <li>NCHW
1853      </ul>
1854  <td>
1855    <table>
1856    <tr><th>src<th>dst
1857    <tr><td>F16<td>F16
1858    <tr><td>F32<td>F32
1859    </table>
1860<tr>
1861  <td>CLInstanceNormalizationLayer
1862  <td>
1863      <ul>
1864       <li>NHWC
1865       <li>NCHW
1866      </ul>
1867  <td>
1868    <table>
1869    <tr><th>src<th>dst
1870    <tr><td>F16<td>F16
1871    <tr><td>F32<td>F32
1872    </table>
1873<tr>
1874  <td rowspan="2">L2NormalizeLayer
1875  <td rowspan="2" style="width:200px;"> Function to perform a L2 normalization on a given axis.
1876  <td rowspan="2">
1877      <ul>
1878       <li>ANEURALNETWORKS_L2_NORMALIZATION
1879      </ul>
1880  <td>NEL2NormalizeLayer
1881  <td>
1882      <ul>
1883       <li>NHWC
1884       <li>NCHW
1885      </ul>
1886  <td>
1887    <table>
1888    <tr><th>src<th>dst
1889    <tr><td>F16<td>F16
1890    <tr><td>F32<td>F32
1891    </table>
1892<tr>
1893  <td>CLL2NormalizeLayer
1894  <td>
1895      <ul>
1896       <li>NHWC
1897       <li>NCHW
1898      </ul>
1899  <td>
1900    <table>
1901    <tr><th>src<th>dst
1902    <tr><td>F16<td>F16
1903    <tr><td>F32<td>F32
1904    </table>
1905<tr>
1906  <td rowspan="3">Logical
1907  <td rowspan="3" style="width:200px;"> Function to perform: - Logical AND - Logical OR - Logical NOT
1908  <td rowspan="3">
1909      <ul>
1910       <li>n/a
1911      </ul>
1912  <td>NELogicalAnd
1913  <td>
1914      <ul>
1915       <li>All
1916      </ul>
1917  <td>
1918    <table>
1919    <tr><th>src0<th>src1<th>dst
1920    <tr><td>U8<td>U8<td>U8
1921    </table>
1922<tr>
1923  <td>NELogicalOr
1924  <td>
1925      <ul>
1926       <li>All
1927      </ul>
1928  <td>
1929    <table>
1930    <tr><th>src0<th>src1<th>dst
1931    <tr><td>U8<td>U8<td>U8
1932    </table>
1933<tr>
1934  <td>NELogicalNot
1935  <td>
1936      <ul>
1937       <li>All
1938      </ul>
1939  <td>
1940    <table>
1941    <tr><th>src<th>dst
1942    <tr><td>U8<td>U8
1943    </table>
1944<tr>
1945  <td rowspan="1">LogicalAnd
1946  <td rowspan="1" style="width:200px;"> Function to perform Logical AND.
1947  <td rowspan="1">
1948      <ul>
1949       <li>n/a
1950      </ul>
1951  <td>CLLogicalAnd
1952  <td>
1953      <ul>
1954       <li>All
1955      </ul>
1956  <td>
1957    <table>
1958    <tr><th>src0<th>src1<th>dst
1959    <tr><td>U8<td>U8<td>U8
1960    </table>
1961<tr>
1962  <td rowspan="1">LogicalOr
1963  <td rowspan="1" style="width:200px;"> Function to perform Logical OR.
1964  <td rowspan="1">
1965      <ul>
1966       <li>n/a
1967      </ul>
1968  <td>CLLogicalOr
1969  <td>
1970      <ul>
1971       <li>All
1972      </ul>
1973  <td>
1974    <table>
1975    <tr><th>src0<th>src1<th>dst
1976    <tr><td>U8<td>U8<td>U8
1977    </table>
1978<tr>
1979  <td rowspan="1">LogicalNot
1980  <td rowspan="1" style="width:200px;"> Function to perform Logical NOT.
1981  <td rowspan="1">
1982      <ul>
1983       <li>n/a
1984      </ul>
1985  <td>CLLogicalNot
1986  <td>
1987      <ul>
1988       <li>All
1989      </ul>
1990  <td>
1991    <table>
1992    <tr><th>src<th>dst
1993    <tr><td>U8<td>U8
1994    </table>
1995<tr>
1996  <td rowspan="2">LSTMLayer
1997  <td rowspan="2" style="width:200px;"> Function to perform a single time step in a Long Short-Term Memory (LSTM) layer.
1998  <td rowspan="2">
1999      <ul>
2000       <li>ANEURALNETWORKS_LSTM
2001      </ul>
2002  <td>NELSTMLayer
2003  <td>
2004      <ul>
2005       <li>All
2006      </ul>
2007  <td>
2008    <table>
2009    <tr><th>src0 - src13<th>dst0 - dst3
2010    <tr><td>F16<td>F16
2011    <tr><td>F32<td>F32
2012    </table>
2013<tr>
2014  <td>CLLSTMLayer
2015  <td>
2016      <ul>
2017       <li>All
2018      </ul>
2019  <td>
2020    <table>
2021    <tr><th>src0 - src13<th>dst0 - dst3
2022    <tr><td>F16<td>F16
2023    <tr><td>F32<td>F32
2024    </table>
2025<tr>
2026  <td rowspan="2">LSTMLayerQuantized
2027  <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory)
2028  <td rowspan="2">
2029      <ul>
2030       <li>ANEURALNETWORKS_QUANTIZED_LSTM
2031       <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2032      </ul>
2033  <td>NELSTMLayerQuantized
2034  <td>
2035      <ul>
2036       <li>All
2037      </ul>
2038  <td>
2039    <table>
2040    <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2041    <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2042    </table>
2043<tr>
2044  <td>CLLSTMLayerQuantized
2045  <td>
2046      <ul>
2047       <li>All
2048      </ul>
2049  <td>
2050    <table>
2051    <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2052    <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2053    </table>
2054<tr>
2055  <td rowspan="2">MaxUnpoolingLayer
2056  <td rowspan="2" style="width:200px;"> Function to perform MaxUnpooling.
2057  <td rowspan="2">
2058      <ul>
2059       <li>n/a
2060      </ul>
2061  <td>NEMaxUnpoolingLayer
2062  <td>
2063      <ul>
2064       <li>NHWC
2065       <li>NCHW
2066      </ul>
2067  <td>
2068    <table>
2069    <tr><th>src<th>dst
2070    <tr><td>QASYMM8<td>QASYMM8
2071    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2072    <tr><td>F16<td>F16
2073    <tr><td>F32<td>F32
2074    </table>
2075<tr>
2076  <td>CLMaxUnpoolingLayer
2077  <td>
2078      <ul>
2079       <li>NHWC
2080       <li>NCHW
2081      </ul>
2082  <td>
2083    <table>
2084    <tr><th>src<th>dst
2085    <tr><td>QASYMM8<td>QASYMM8
2086    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2087    <tr><td>F16<td>F16
2088    <tr><td>F32<td>F32
2089    </table>
2090<tr>
2091  <td rowspan="2">MeanStdDevNormalizationLayer
2092  <td rowspan="2" style="width:200px;"> Function to execute mean and standard deviation normalization.
2093  <td rowspan="2">
2094      <ul>
2095       <li>n/a
2096      </ul>
2097  <td>NEMeanStdDevNormalizationLayer
2098  <td>
2099      <ul>
2100       <li>NHWC
2101       <li>NCHW
2102      </ul>
2103  <td>
2104    <table>
2105    <tr><th>src<th>dst
2106    <tr><td>F32<td>F32
2107    <tr><td>F16<td>F16
2108    </table>
2109<tr>
2110  <td>CLMeanStdDevNormalizationLayer
2111  <td>
2112      <ul>
2113       <li>NHWC
2114       <li>NCHW
2115      </ul>
2116  <td>
2117    <table>
2118    <tr><th>src<th>dst
2119    <tr><td>F32<td>F32
2120    <tr><td>F16<td>F16
2121    </table>
2122<tr>
2123  <td rowspan="2">NormalizationLayer
2124  <td rowspan="2" style="width:200px;"> Function to compute normalization layer.
2125  <td rowspan="2">
2126      <ul>
2127       <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2128      </ul>
2129  <td>NENormalizationLayer
2130  <td>
2131      <ul>
2132       <li>NHWC
2133       <li>NCHW
2134      </ul>
2135  <td>
2136    <table>
2137    <tr><th>src<th>dst
2138    <tr><td>F32<td>F32
2139    <tr><td>F16<td>F16
2140    </table>
2141<tr>
2142  <td>CLNormalizationLayer
2143  <td>
2144      <ul>
2145       <li>NHWC
2146       <li>NCHW
2147      </ul>
2148  <td>
2149    <table>
2150    <tr><th>src<th>dst
2151    <tr><td>F32<td>F32
2152    <tr><td>F16<td>F16
2153    </table>
2154<tr>
2155  <td rowspan="2">PadLayer
2156  <td rowspan="2" style="width:200px;"> Function to pad a tensor.
2157  <td rowspan="2">
2158      <ul>
2159       <li>ANEURALNETWORKS_PAD
2160       <li>ANEURALNETWORKS_PAD_V2
2161      </ul>
2162  <td>NEPadLayer
2163  <td>
2164      <ul>
2165       <li>NHWC
2166       <li>NCHW
2167      </ul>
2168  <td>
2169    <table>
2170    <tr><th>src<th>dst
2171    <tr><td>All<td>All
2172    </table>
2173<tr>
2174  <td>CLPadLayer
2175  <td>
2176      <ul>
2177       <li>NHWC
2178       <li>NCHW
2179      </ul>
2180  <td>
2181    <table>
2182    <tr><th>src<th>dst
2183    <tr><td>All<td>All
2184    </table>
2185<tr>
2186  <td rowspan="2">Permute
2187  <td rowspan="2" style="width:200px;"> Function to transpose an ND tensor.
2188  <td rowspan="2">
2189      <ul>
2190       <li>ANEURALNETWORKS_TRANSPOSE
2191      </ul>
2192  <td>NEPermute
2193  <td>
2194      <ul>
2195       <li>NHWC
2196       <li>NCHW
2197      </ul>
2198  <td>
2199    <table>
2200    <tr><th>src<th>dst
2201    <tr><td>All<td>All
2202    </table>
2203<tr>
2204  <td>CLPermute
2205  <td>
2206      <ul>
2207       <li>NHWC
2208       <li>NCHW
2209      </ul>
2210  <td>
2211    <table>
2212    <tr><th>src<th>dst
2213    <tr><td>All<td>All
2214    </table>
2215<tr>
2216  <td rowspan="2">PixelWiseMultiplication
2217  <td rowspan="2" style="width:200px;"> Function to perform a multiplication.
2218  <td rowspan="2">
2219      <ul>
2220       <li>ANEURALNETWORKS_MUL
2221      </ul>
2222  <td>NEPixelWiseMultiplication
2223  <td>
2224      <ul>
2225       <li>All
2226      </ul>
2227  <td>
2228    <table>
2229    <tr><th>src0<th>src1<th>dst
2230    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2231    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2232    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2233    <tr><td>QSYMM16<td>QSYMM16<td>S32
2234    <tr><td>U8<td>U8<td>U8
2235    <tr><td>U8<td>U8<td>S16
2236    <tr><td>U8<td>S16<td>S16
2237    <tr><td>S16<td>U8<td>S16
2238    <tr><td>S16<td>S16<td>S16
2239    <tr><td>F16<td>F16<td>F16
2240    <tr><td>F32<td>S32<td>F32
2241    </table>
2242<tr>
2243  <td>CLPixelWiseMultiplication
2244  <td>
2245      <ul>
2246       <li>All
2247      </ul>
2248  <td>
2249    <table>
2250    <tr><th>src0<th>src1<th>dst
2251    <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2252    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2253    <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2254    <tr><td>QSYMM16<td>QSYMM16<td>S32
2255    <tr><td>U8<td>U8<td>U8
2256    <tr><td>U8<td>U8<td>S16
2257    <tr><td>U8<td>S16<td>S16
2258    <tr><td>S16<td>U8<td>S16
2259    <tr><td>S16<td>S16<td>S16
2260    <tr><td>F16<td>F16<td>F16
2261    <tr><td>F32<td>F32<td>F32
2262    <tr><td>S32<td>S32<td>S32
2263    </table>
2264<tr>
2265  <td rowspan="2">PoolingLayer
2266  <td rowspan="2" style="width:200px;"> Function to perform pooling with the specified pooling operation.
2267  <td rowspan="2">
2268      <ul>
2269       <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2270       <li>ANEURALNETWORKS_L2_POOL_2D
2271       <li>ANEURALNETWORKS_MAX_POOL_2D
2272      </ul>
2273  <td>NEPoolingLayer
2274  <td>
2275      <ul>
2276       <li>NHWC
2277       <li>NCHW
2278      </ul>
2279  <td>
2280    <table>
2281    <tr><th>src<th>dst
2282    <tr><td>QASYMM8<td>QASYMM8
2283    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2284    <tr><td>F16<td>F16
2285    <tr><td>F32<td>F32
2286    </table>
2287<tr>
2288  <td>CLPoolingLayer
2289  <td>
2290      <ul>
2291       <li>NHWC
2292       <li>NCHW
2293      </ul>
2294  <td>
2295    <table>
2296    <tr><th>src<th>dst
2297    <tr><td>QASYMM8<td>QASYMM8
2298    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2299    <tr><td>F16<td>F16
2300    <tr><td>F32<td>F32
2301    </table>
2302<tr>
2303  <td rowspan="2">Pooling3dLayer
2304  <td rowspan="2" style="width:200px;"> Function to perform pooling 3D with the specified pooling operation.
2305  <td rowspan="2">
2306      <ul>
2307       <li>N/A
2308      </ul>
2309  <td>NEPooling3dLayer
2310  <td>
2311      <ul>
2312       <li>NDHWC
2313      </ul>
2314  <td>
2315    <table>
2316    <tr><th>src<th>dst
2317    <tr><td>F16<td>F16
2318    <tr><td>F32<td>F32
2319    <tr><td>QASYMM8<td>QASYMM8
2320    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2321    </table>
2322<tr>
2323  <td>CLPooling3dLayer
2324  <td>
2325      <ul>
2326       <li>NDHWC
2327      </ul>
2328  <td>
2329    <table>
2330    <tr><th>src<th>dst
2331    <tr><td>F16<td>F16
2332    <tr><td>F32<td>F32
2333    <tr><td>QASYMM8<td>QASYMM8
2334    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2335    </table>
2336<tr>
2337  <td rowspan="2">PReluLayer
2338  <td rowspan="2" style="width:200px;"> Function to compute the activation layer with the PRELU activation function.
2339  <td rowspan="2">
2340      <ul>
2341       <li>ANEURALNETWORKS_PRELU
2342      </ul>
2343  <td>NEPReluLayer
2344  <td>
2345      <ul>
2346       <li>All
2347      </ul>
2348  <td>
2349    <table>
2350    <tr><th>src<th>dst
2351    <tr><td>QASYMM8<td>QASYMM8
2352    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2353    <tr><td>F16<td>F16
2354    <tr><td>F32<td>F32
2355    </table>
2356<tr>
2357  <td>CLPReluLayer
2358  <td>
2359      <ul>
2360       <li>All
2361      </ul>
2362  <td>
2363    <table>
2364    <tr><th>src<th>dst
2365    <tr><td>QASYMM8<td>QASYMM8
2366    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2367    <tr><td>F16<td>F16
2368    <tr><td>F32<td>F32
2369    </table>
2370<tr>
2371  <td rowspan="2">PriorBoxLayer
2372  <td rowspan="2" style="width:200px;"> Function to compute prior boxes and clip.
2373  <td rowspan="2">
2374      <ul>
2375       <li>n/a
2376      </ul>
2377  <td>NEPriorBoxLayer
2378  <td>
2379      <ul>
2380       <li>NHWC
2381       <li>NCHW
2382      </ul>
2383  <td>
2384    <table>
2385    <tr><th>src0<th>src1<th>dst
2386    <tr><td>F32<td>F32<td>F32
2387    </table>
2388<tr>
2389  <td>CLPriorBoxLayer
2390  <td>
2391      <ul>
2392       <li>NHWC
2393       <li>NCHW
2394      </ul>
2395  <td>
2396    <table>
2397    <tr><th>src0<th>src1<th>dst
2398    <tr><td>F32<td>F32<td>F32
2399    </table>
2400<tr>
2401  <td rowspan="2">QLSTMLayer
2402  <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory).
2403  <td rowspan="2">
2404      <ul>
2405       <li>ANEURALNETWORKS_QUANTIZED_LSTM
2406       <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2407      </ul>
2408  <td>NEQLSTMLayer
2409  <td>
2410      <ul>
2411       <li>All
2412      </ul>
2413  <td>
2414    <table>
2415    <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2416    <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2417    </table>
2418<tr>
2419  <td>CLQLSTMLayer
2420  <td>
2421      <ul>
2422       <li>All
2423      </ul>
2424  <td>
2425    <table>
2426    <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2427    <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2428    </table>
2429<tr>
2430  <td rowspan="2">QuantizationLayer
2431  <td rowspan="2" style="width:200px;"> Function to perform quantization layer
2432  <td rowspan="2">
2433      <ul>
2434       <li>ANEURALNETWORKS_QUANTIZE
2435      </ul>
2436  <td>NEQuantizationLayer
2437  <td>
2438      <ul>
2439       <li>All
2440      </ul>
2441  <td>
2442    <table>
2443    <tr><th>src<th>dst
2444    <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2445    <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2446    <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2447    <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2448    </table>
2449<tr>
2450  <td>CLQuantizationLayer
2451  <td>
2452      <ul>
2453       <li>All
2454      </ul>
2455  <td>
2456    <table>
2457    <tr><th>src<th>dst
2458    <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2459    <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2460    <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2461    <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2462    </table>
2463<tr>
2464  <td rowspan="2">Range
2465  <td rowspan="2" style="width:200px;"> Function to generates a sequence of numbers starting from START and extends by increments of 'STEP' up to but not including 'END'.
2466  <td rowspan="2">
2467      <ul>
2468       <li>n/a
2469      </ul>
2470  <td>NERange
2471  <td>
2472      <ul>
2473       <li>All
2474      </ul>
2475  <td>
2476    <table>
2477    <tr><th>dst
2478    <tr><td>U8
2479    <tr><td>S8
2480    <tr><td>U16
2481    <tr><td>S16
2482    <tr><td>U32
2483    <tr><td>S32
2484    <tr><td>F16
2485    <tr><td>F32
2486    </table>
2487<tr>
2488  <td>CLRange
2489  <td>
2490      <ul>
2491       <li>All
2492      </ul>
2493  <td>
2494    <table>
2495    <tr><th>dst
2496    <tr><td>U8
2497    <tr><td>S8
2498    <tr><td>QASYMM8
2499    <tr><td>U16
2500    <tr><td>S16
2501    <tr><td>U32
2502    <tr><td>S32
2503    <tr><td>F16
2504    <tr><td>F32
2505    </table>
2506<tr>
2507  <td rowspan="2">ReduceMean
2508  <td rowspan="2" style="width:200px;"> Function to perform reduce mean operation.
2509  <td rowspan="2">
2510      <ul>
2511       <li>ANEURALNETWORKS_MEAN
2512      </ul>
2513  <td>NEReduceMean
2514  <td>
2515      <ul>
2516       <li>All
2517      </ul>
2518  <td>
2519    <table>
2520    <tr><th>src<th>dst
2521    <tr><td>QASYMM8<td>QASYMM8
2522    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2523    <tr><td>F16<td>F16
2524    <tr><td>F32<td>F32
2525    </table>
2526<tr>
2527  <td>CLReduceMean
2528  <td>
2529      <ul>
2530       <li>All
2531      </ul>
2532  <td>
2533    <table>
2534    <tr><th>src<th>dst
2535    <tr><td>QASYMM8<td>QASYMM8
2536    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2537    <tr><td>F16<td>F16
2538    <tr><td>F32<td>F32
2539    </table>
2540<tr>
2541  <td rowspan="2">ReductionOperation
2542  <td rowspan="2" style="width:200px;"> Function to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM:    Mean of sum - PROD:        Product - SUM_SQUARE:  Sum of squares - SUM:         Sum - MIN:         Min - MAX:         Max
2543  <td rowspan="2">
2544      <ul>
2545       <li>ANEURALNETWORKS_REDUCE_ALL
2546       <li>ANEURALNETWORKS_REDUCE_ANY
2547       <li>ANEURALNETWORKS_REDUCE_MAX
2548       <li>ANEURALNETWORKS_REDUCE_MIN
2549       <li>ANEURALNETWORKS_REDUCE_PROD
2550       <li>ANEURALNETWORKS_REDUCE_SUM
2551      </ul>
2552  <td>NEReductionOperation
2553  <td>
2554      <ul>
2555       <li>All
2556      </ul>
2557  <td>
2558    <table>
2559    <tr><th>src<th>dst
2560    <tr><td>QASYMM8<td>QASYMM8
2561    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2562    <tr><td>F16<td>F16
2563    <tr><td>F32<td>F32
2564    <tr><td>S32<td>S32
2565    </table>
2566<tr>
2567  <td>CLReductionOperation
2568  <td>
2569      <ul>
2570       <li>All
2571      </ul>
2572  <td>
2573    <table>
2574    <tr><th>src<th>dst
2575    <tr><td>QASYMM8<td>QASYMM8
2576    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2577    <tr><td>F16<td>F16
2578    <tr><td>F32<td>F32
2579    <tr><td>S32<td>S32
2580    </table>
2581<tr>
2582  <td rowspan="2">ReorgLayer
2583  <td rowspan="2" style="width:200px;"> Performs a reorganization layer of input tensor to the output tensor.
2584  <td rowspan="2">
2585      <ul>
2586       <li>n/a
2587      </ul>
2588  <td>NEReorgLayer
2589  <td>
2590      <ul>
2591       <li>NHWC
2592       <li>NCHW
2593      </ul>
2594  <td>
2595    <table>
2596    <tr><th>src<th>dst
2597    <tr><td>All<td>All
2598    </table>
2599<tr>
2600  <td>CLReorgLayer
2601  <td>
2602      <ul>
2603       <li>NHWC
2604       <li>NCHW
2605      </ul>
2606  <td>
2607    <table>
2608    <tr><th>src<th>dst
2609    <tr><td>All<td>All
2610    </table>
2611<tr>
2612  <td rowspan="2">ReshapeLayer
2613  <td rowspan="2" style="width:200px;"> Function to reshape a tensor.
2614  <td rowspan="2">
2615      <ul>
2616       <li>ANEURALNETWORKS_RESHAPE
2617       <li>ANEURALNETWORKS_SQUEEZE
2618      </ul>
2619  <td>NEReshapeLayer
2620  <td>
2621      <ul>
2622       <li>All
2623      </ul>
2624  <td>
2625    <table>
2626    <tr><th>src<th>dst
2627    <tr><td>All<td>All
2628    </table>
2629<tr>
2630  <td>CLReshapeLayer
2631  <td>
2632      <ul>
2633       <li>All
2634      </ul>
2635  <td>
2636    <table>
2637    <tr><th>src<th>dst
2638    <tr><td>All<td>All
2639    </table>
2640<tr>
2641  <td rowspan="2">Reverse
2642  <td rowspan="2" style="width:200px;"> Function to reverse tensor according to axis.
2643  <td rowspan="2">
2644      <ul>
2645       <li>n/a
2646      </ul>
2647  <td>NEReverse
2648  <td>
2649      <ul>
2650       <li>All
2651      </ul>
2652  <td>
2653    <table>
2654    <tr><th>src0<th>src1<th>dst
2655    <tr><td>All<td>U32<td>All
2656    </table>
2657<tr>
2658  <td>CLReverse
2659  <td>
2660      <ul>
2661       <li>All
2662      </ul>
2663  <td>
2664    <table>
2665    <tr><th>src0<th>src1<th>dst
2666    <tr><td>All<td>U32<td>All
2667    </table>
2668<tr>
2669  <td rowspan="2">RNNLayer
2670  <td rowspan="2" style="width:200px;"> Function to perform recurrent neural network layer.
2671  <td rowspan="2">
2672      <ul>
2673       <li>ANEURALNETWORKS_RNN
2674      </ul>
2675  <td>NERNNLayer
2676  <td>
2677      <ul>
2678       <li>NHWC
2679       <li>NCHW
2680      </ul>
2681  <td>
2682    <table>
2683    <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2684    <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2685    <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2686    </table>
2687<tr>
2688  <td>CLRNNLayer
2689  <td>
2690      <ul>
2691       <li>NHWC
2692       <li>NCHW
2693      </ul>
2694  <td>
2695    <table>
2696    <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2697    <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2698    <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2699    </table>
2700<tr>
2701  <td rowspan="2">ROIAlignLayer
2702  <td rowspan="2" style="width:200px;"> Function to perform ROI alignment.
2703  <td rowspan="2">
2704      <ul>
2705       <li>ANEURALNETWORKS_ROI_ALIGN
2706      </ul>
2707  <td>NEROIAlignLayer
2708  <td>
2709      <ul>
2710       <li>All
2711      </ul>
2712  <td>
2713    <table>
2714    <tr><th>src0<th>src1<th>dst
2715    <tr><td>F16<td>F16<td>F16
2716    <tr><td>F32<td>F32<td>F32
2717    <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2718    <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2719    </table>
2720<tr>
2721  <td>CLROIAlignLayer
2722  <td>
2723      <ul>
2724       <li>All
2725      </ul>
2726  <td>
2727    <table>
2728    <tr><th>src0<th>src1<th>dst
2729    <tr><td>F16<td>F16<td>F16
2730    <tr><td>F32<td>F32<td>F32
2731    <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2732    <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2733    </table>
2734<tr>
2735  <td rowspan="2">ROIPoolingLayer
2736  <td rowspan="2" style="width:200px;"> Function to perform ROI pooling.
2737  <td rowspan="2">
2738      <ul>
2739       <li>ANEURALNETWORKS_ROI_POOLING
2740      </ul>
2741  <td>NEROIPoolingLayer
2742  <td>
2743      <ul>
2744       <li>All
2745      </ul>
2746  <td>
2747    <table>
2748    <tr><th>src0<th>src1<th>dst
2749    <tr><td>F32<td>U16<td>F32
2750    <tr><td>QASYMM8<td>U16<td>QASYMM8
2751    </table>
2752<tr>
2753  <td>CLROIPoolingLayer
2754  <td>
2755      <ul>
2756       <li>All
2757      </ul>
2758  <td>
2759    <table>
2760    <tr><th>src0<th>src1<th>dst
2761    <tr><td>F16<td>U16<td>F16
2762    <tr><td>F32<td>U16<td>F32
2763    <tr><td>QASYMM8<td>U16<td>QASYMM8
2764    </table>
2765<tr>
2766  <td rowspan="2">Scale
2767  <td rowspan="2" style="width:200px;"> Function to perform resize a tensor using to interpolate: - Bilinear - Nearest neighbor
2768  <td rowspan="2">
2769      <ul>
2770       <li>ANEURALNETWORKS_RESIZE_BILINEAR
2771       <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2772      </ul>
2773  <td>NEScale
2774  <td>
2775      <ul>
2776       <li>NHWC
2777       <li>NCHW
2778      </ul>
2779  <td>
2780    <table>
2781    <tr><th>src<th>dst
2782    <tr><td>QASYMM8<td>QASYMM8
2783    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2784    <tr><td>F16<td>F16
2785    <tr><td>F32<td>F32
2786    <tr><td>U8<td>U8
2787    <tr><td>S8<td>S8
2788    <tr><td>S16<td>S16
2789    </table>
2790<tr>
2791  <td>CLScale
2792  <td>
2793      <ul>
2794       <li>NHWC
2795       <li>NCHW
2796      </ul>
2797  <td>
2798    <table>
2799    <tr><th>src<th>dst
2800    <tr><td>QASYMM8<td>QASYMM8
2801    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2802    <tr><td>F16<td>F16
2803    <tr><td>F32<td>F32
2804    <tr><td>U8<td>U8
2805    <tr><td>S16<td>S16
2806    </table>
2807<tr>
2808  <td rowspan="2">Select
2809  <td rowspan="2" style="width:200px;"> Function to select values from 2 tensors depending on an input tensor of booleans.
2810  <td rowspan="2">
2811      <ul>
2812       <li>ANEURALNETWORKS_SELECT
2813      </ul>
2814  <td>NESelect
2815  <td>
2816      <ul>
2817       <li>All
2818      </ul>
2819  <td>
2820    <table>
2821    <tr><th>src0<th>src1<th>src2<th>dst
2822    <tr><td>U8<td>All<td>All<td>All
2823    </table>
2824<tr>
2825  <td>CLSelect
2826  <td>
2827      <ul>
2828       <li>All
2829      </ul>
2830  <td>
2831    <table>
2832    <tr><th>src0<th>src1<th>src2<th>dst
2833    <tr><td>U8<td>All<td>All<td>All
2834    </table>
2835<tr>
2836  <td rowspan="2">Slice
2837  <td rowspan="2" style="width:200px;"> Function to perform tensor slicing.
2838  <td rowspan="2">
2839      <ul>
2840       <li>ANEURALNETWORKS_SLICE
2841      </ul>
2842  <td>NESlice
2843  <td>
2844      <ul>
2845       <li>All
2846      </ul>
2847  <td>
2848    <table>
2849    <tr><th>src<th>dst
2850    <tr><td>All<td>All
2851    </table>
2852<tr>
2853  <td>CLSlice
2854  <td>
2855      <ul>
2856       <li>All
2857      </ul>
2858  <td>
2859    <table>
2860    <tr><th>src<th>dst
2861    <tr><td>All<td>All
2862    </table>
2863<tr>
2864  <td rowspan="2">SoftmaxLayer
2865  <td rowspan="2" style="width:200px;"> Function to compute a SoftmaxLayer and a Log SoftmaxLayer.
2866  <td rowspan="2">
2867      <ul>
2868       <li>ANEURALNETWORKS_LOG_SOFTMAX
2869       <li>ANEURALNETWORKS_SOFTMAX
2870      </ul>
2871  <td>NESoftmaxLayerGeneric
2872  <td>
2873      <ul>
2874       <li>All
2875      </ul>
2876  <td>
2877    <table>
2878    <tr><th>src<th>dst
2879    <tr><td>QASYMM8<td>QASYMM8
2880    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2881    <tr><td>F16<td>F16
2882    <tr><td>F32<td>F32
2883    </table>
2884<tr>
2885  <td>CLSoftmaxLayerGeneric
2886  <td>
2887      <ul>
2888       <li>All
2889      </ul>
2890  <td>
2891    <table>
2892    <tr><th>src<th>dst
2893    <tr><td>QASYMM8<td>QASYMM8
2894    <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2895    <tr><td>F16<td>F16
2896    <tr><td>F32<td>F32
2897    </table>
2898<tr>
2899  <td rowspan="2">SpaceToBatchLayer
2900  <td rowspan="2" style="width:200px;"> Function to divide a tensor spatially.
2901  <td rowspan="2">
2902      <ul>
2903       <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
2904      </ul>
2905  <td>NESpaceToBatchLayer
2906  <td>
2907      <ul>
2908       <li>NHWC
2909       <li>NCHW
2910      </ul>
2911  <td>
2912    <table>
2913    <tr><th>src0<th>src1<th>src2<th>dst
2914    <tr><td>All<td>S32<td>S32<td>All
2915    </table>
2916<tr>
2917  <td>CLSpaceToBatchLayer
2918  <td>
2919      <ul>
2920       <li>NHWC
2921       <li>NCHW
2922      </ul>
2923  <td>
2924    <table>
2925    <tr><th>src0<th>src1<th>src2<th>dst
2926    <tr><td>All<td>S32<td>S32<td>All
2927    </table>
2928<tr>
2929  <td rowspan="2">SpaceToDepthLayer
2930  <td rowspan="2" style="width:200px;"> Function to rearrange blocks of spatial data into depth.
2931  <td rowspan="2">
2932      <ul>
2933       <li>ANEURALNETWORKS_SPACE_TO_DEPTH
2934      </ul>
2935  <td>NESpaceToDepthLayer
2936  <td>
2937      <ul>
2938       <li>NHWC
2939       <li>NCHW
2940      </ul>
2941  <td>
2942    <table>
2943    <tr><th>src<th>dst
2944    <tr><td>All<td>All
2945    </table>
2946<tr>
2947  <td>CLSpaceToDepthLayer
2948  <td>
2949      <ul>
2950       <li>NHWC
2951       <li>NCHW
2952      </ul>
2953  <td>
2954    <table>
2955    <tr><th>src<th>dst
2956    <tr><td>All<td>All
2957    </table>
2958<tr>
2959  <td rowspan="2">Split
2960  <td rowspan="2" style="width:200px;"> Function to split a tensor along a given axis.
2961  <td rowspan="2">
2962      <ul>
2963       <li>ANEURALNETWORKS_SPLIT
2964      </ul>
2965  <td>NESplit
2966  <td>
2967      <ul>
2968       <li>All
2969      </ul>
2970  <td>
2971    <table>
2972    <tr><th>src<th>dst
2973    <tr><td>All<td>All
2974    </table>
2975<tr>
2976  <td>CLSplit
2977  <td>
2978      <ul>
2979       <li>All
2980      </ul>
2981  <td>
2982    <table>
2983    <tr><th>src<th>dst
2984    <tr><td>All<td>All
2985    </table>
2986<tr>
2987  <td rowspan="2">StackLayer
2988  <td rowspan="2" style="width:200px;"> Function to stack tensors along an axis.
2989  <td rowspan="2">
2990      <ul>
2991       <li>n/a
2992      </ul>
2993  <td>NEStackLayer
2994  <td>
2995      <ul>
2996       <li>All
2997      </ul>
2998  <td>
2999    <table>
3000    <tr><th>src<th>dst
3001    <tr><td>All<td>All
3002    </table>
3003<tr>
3004  <td>CLStackLayer
3005  <td>
3006      <ul>
3007       <li>All
3008      </ul>
3009  <td>
3010    <table>
3011    <tr><th>src<th>dst
3012    <tr><td>All<td>All
3013    </table>
3014<tr>
3015  <td rowspan="2">StridedSlice
3016  <td rowspan="2" style="width:200px;"> Function to extract a strided slice of a tensor.
3017  <td rowspan="2">
3018      <ul>
3019       <li>ANEURALNETWORKS_STRIDED_SLICE
3020      </ul>
3021  <td>NEStridedSlice
3022  <td>
3023      <ul>
3024       <li>All
3025      </ul>
3026  <td>
3027    <table>
3028    <tr><th>src<th>dst
3029    <tr><td>All<td>All
3030    </table>
3031<tr>
3032  <td>CLStridedSlice
3033  <td>
3034      <ul>
3035       <li>All
3036      </ul>
3037  <td>
3038    <table>
3039    <tr><th>src<th>dst
3040    <tr><td>All<td>All
3041    </table>
3042<tr>
3043  <td rowspan="2">Tile
3044  <td rowspan="2" style="width:200px;"> Function to construct a tensor by tiling a given tensor.
3045  <td rowspan="2">
3046      <ul>
3047       <li>ANEURALNETWORKS_TILE
3048      </ul>
3049  <td>NETile
3050  <td>
3051      <ul>
3052       <li>All
3053      </ul>
3054  <td>
3055    <table>
3056    <tr><th>src<th>dst
3057    <tr><td>All<td>All
3058    </table>
3059<tr>
3060  <td>CLTile
3061  <td>
3062      <ul>
3063       <li>All
3064      </ul>
3065  <td>
3066    <table>
3067    <tr><th>src<th>dst
3068    <tr><td>All<td>All
3069    </table>
3070<tr>
3071  <td rowspan="2">Transpose
3072  <td rowspan="2" style="width:200px;"> Function to transpose a 2D tensor.
3073  <td rowspan="2">
3074      <ul>
3075       <li>ANEURALNETWORKS_TRANSPOSE
3076      </ul>
3077  <td>NETranspose
3078  <td>
3079      <ul>
3080       <li>All
3081      </ul>
3082  <td>
3083    <table>
3084    <tr><th>src<th>dst
3085    <tr><td>All<td>All
3086    </table>
3087<tr>
3088  <td>CLTranspose
3089  <td>
3090      <ul>
3091       <li>All
3092      </ul>
3093  <td>
3094    <table>
3095    <tr><th>src<th>dst
3096    <tr><td>All<td>All
3097    </table>
3098<tr>
3099  <td rowspan="2">Unstack
3100  <td rowspan="2" style="width:200px;"> Function to unpack a rank-R tensor into rank-(R-1) tensors.
3101  <td rowspan="2">
3102      <ul>
3103       <li>n/a
3104      </ul>
3105  <td>NEUnstack
3106  <td>
3107      <ul>
3108       <li>All
3109      </ul>
3110  <td>
3111    <table>
3112    <tr><th>src<th>dst
3113    <tr><td>All<td>All
3114    </table>
3115<tr>
3116  <td>CLUnstack
3117  <td>
3118      <ul>
3119       <li>All
3120      </ul>
3121  <td>
3122    <table>
3123    <tr><th>src<th>dst
3124    <tr><td>All<td>All
3125    </table>
3126<tr>
3127  <td rowspan="2">WinogradConvolutionLayer
3128  <td rowspan="2" style="width:200px;"> Function to do Winograd Convolution.
3129  <td rowspan="2">
3130      <ul>
3131       <li>ANEURALNETWORKS_CONV_2D
3132      </ul>
3133  <td>NEWinogradConvolutionLayer
3134  <td>
3135      <ul>
3136       <li>NHWC
3137       <li>NCHW
3138      </ul>
3139  <td>
3140    <table>
3141    <tr><th>src0<th>src1<th>src2<th>dst
3142    <tr><td>F16<td>F16<td>F16<td>F16
3143    <tr><td>F32<td>F32<td>F32<td>F32
3144    </table>
3145<tr>
3146  <td>CLWinogradConvolutionLayer
3147  <td>
3148      <ul>
3149       <li>NHWC
3150       <li>NCHW
3151      </ul>
3152  <td>
3153    <table>
3154    <tr><th>src0<th>src1<th>src2<th>dst
3155    <tr><td>F16<td>F16<td>F16<td>F16
3156    <tr><td>F32<td>F32<td>F32<td>F32
3157    </table>
3158</table>
3159
3160*/
3161} // namespace
3162