1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup NeuralNetworks
19  * @{
20  */
21 
22 /**
23  * @file NeuralNetworks.h
24  */
25 
26 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_H
27 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_H
28 
29 /******************************************************************
30  *
31  * IMPORTANT NOTICE:
32  *
33  *   This file is part of Android's set of stable system headers
34  *   exposed by the Android NDK (Native Development Kit).
35  *
36  *   Third-party source AND binary code relies on the definitions
37  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
38  *
39  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
40  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
41  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
42  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
43  */
44 
45 #include <stdbool.h>
46 #include <stddef.h>
47 #include <stdint.h>
48 #include <sys/cdefs.h>
49 
50 #include "NeuralNetworksTypes.h"
51 
52 #ifdef __ANDROID__
53 #include <android/hardware_buffer.h>
54 #else
55 #endif  // __ANDROID__
56 
57 #if !defined(__DEPRECATED_IN)
58 #define __DEPRECATED_IN(api_level, msg) __attribute__((annotate("deprecated_in=" #api_level)))
59 #endif
60 
61 // This is required for building libneuralnetworks_cl,
62 // the symbols have same names as in NDK, but
63 // they are not bounded by API availability.
64 #ifdef NN_COMPATIBILITY_LIBRARY_BUILD
65 #define __NNAPI_INTRODUCED_IN(x)
66 #define __NNAPI_DEPRECATED_IN(x)
67 #else
68 #define __NNAPI_INTRODUCED_IN(x) __INTRODUCED_IN(x)
69 #define __NNAPI_DEPRECATED_IN(x) \
70     __DEPRECATED_IN(x, "NN API is deprecated. Users should migrate to TFlite.")
71 #endif
72 
73 #ifndef __NNAPI_FL5_MIN_ANDROID_API__
74 #define __NNAPI_FL5_MIN_ANDROID_API__ __ANDROID_API_S__
75 #endif
76 
77 __BEGIN_DECLS
78 
79 /**
80  * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
81  *
82  * This only creates the memory descriptor. Its properties should be set with calls to
83  * {@link ANeuralNetworksMemoryDesc_addInputRole},
84  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
85  * {@link ANeuralNetworksMemoryDesc_setDimensions}.
86  *
87  * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set.
88  *
89  * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor
90  * is no longer needed.
91  *
92  * Available since NNAPI feature level 4.
93  *
94  * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
95  *             Set to NULL if unsuccessful.
96  *
97  * @return ANEURALNETWORKS_NO_ERROR if successful.
98  * @deprecated NNAPI is deprecated. See
99  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
100  *   for more details.
101  */
102 int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __NNAPI_INTRODUCED_IN(30)
103         __NNAPI_DEPRECATED_IN(35);
104 
105 /**
106  * Destroy a memory descriptor.
107  *
108  * The memory descriptor need not have been finished by a call to
109  * {@link ANeuralNetworksMemoryDesc_finish}.
110  *
111  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
112  *
113  * Available since NNAPI feature level 4.
114  *
115  * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and
116  *             results in no operation.
117  * @deprecated NNAPI is deprecated. See
118  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
119  *   for more details.
120  */
121 void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30)
122         __NNAPI_DEPRECATED_IN(35);
123 
124 /**
125  * Specify that a memory object will be playing the role of an input to an execution created from a
126  * particular compilation.
127  *
128  * The compilation and the input index fully specify an input operand. This function
129  * may be invoked multiple times on the same memory descriptor with different input operands,
130  * and the same input operand may be specified on multiple memory descriptors. However,
131  * specifying the same input operand on the same memory descriptor more than once will
132  * return an error.
133  *
134  * The dimensions of the corresponding model operands of all the roles specified by
135  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
136  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
137  * dimensions are incompatible if both ranks are fully specified but have different values, or if
138  * there is at least one axis that is fully specified in both but has different values.
139  *
140  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
141  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor
142  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
143  *
144  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
145  * called will return an error.
146  *
147  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
148  *
149  * Available since NNAPI feature level 4.
150  *
151  * @param desc The memory descriptor to be modified.
152  * @param compilation The compilation object. It must already have been finished by calling
153  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
154  *                    descriptor.
155  * @param index The index of the input argument we are referencing from the compilation. It is
156  *              an index into the inputs list passed to
157  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
158  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
159  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
160  *                  memory is to be used in the specified role. This is provided as a hint to
161  *                  optimize the case when different roles prefer different memory locations or data
162  *                  layouts.
163  *
164  * @return ANEURALNETWORKS_NO_ERROR if successful.
165  * @deprecated NNAPI is deprecated. See
166  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
167  *   for more details.
168  */
169 int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc,
170                                            const ANeuralNetworksCompilation* compilation,
171                                            uint32_t index, float frequency)
172         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
173 
174 /**
175  * Specify that a memory object will be playing the role of an output to an execution created from a
176  * particular compilation.
177  *
178  * The compilation and the output index fully specify an output operand. This function
179  * may be invoked multiple times on the same memory descriptor with different output operands,
180  * and the same output operand may be specified on multiple memory descriptors. However,
181  * specifying the same output operand on the same memory descriptor object more than once will
182  * return an error.
183  *
184  * The dimensions of the corresponding model operands of all the roles specified by
185  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
186  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
187  * dimensions are incompatible if both ranks are fully specified but have different values, or if
188  * there is at least one axis that is fully specified in both but has different values.
189  *
190  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
191  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor
192  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
193  *
194  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
195  * called will return an error.
196  *
197  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
198  *
199  * Available since NNAPI feature level 4.
200  *
201  * @param desc The memory descriptor to be modified.
202  * @param compilation The compilation object. It must already have been finished by calling
203  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
204  *                    descriptor.
205  * @param index The index of the output argument we are referencing from the compilation. It is
206  *              an index into the outputs list passed to
207  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
208  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
209  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
210  *                  memory is to be used in the specified role. This is provided as a hint to
211  *                  optimize the case when multiple roles prefer different memory locations or data
212  *                  layouts.
213  *
214  * @return ANEURALNETWORKS_NO_ERROR if successful.
215  * @deprecated NNAPI is deprecated. See
216  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
217  *   for more details.
218  */
219 int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc,
220                                             const ANeuralNetworksCompilation* compilation,
221                                             uint32_t index, float frequency)
222         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
223 
224 /**
225  * Set the dimensional information of the memory descriptor.
226  *
227  * The specified dimensions must be compatible with the dimensions of the corresponding model
228  * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
229  * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks
230  * are fully specified but have different values, or if there is at least one axis that is fully
231  * specified in both but has different values.
232  *
233  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
234  * called will return an error.
235  *
236  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
237  *
238  * Available since NNAPI feature level 4.
239  *
240  * @param desc The memory descriptor to be modified.
241  * @param rank The number of dimensions. Must be 0 for scalars.
242  * @param dimensions An array of dimensions. An entry with the value 0 indicates that the
243  *                   corresponding axis has an unknown size.
244  *
245  * @return ANEURALNETWORKS_NO_ERROR if successful.
246  * @deprecated NNAPI is deprecated. See
247  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
248  *   for more details.
249  */
250 int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank,
251                                             const uint32_t* dimensions) __NNAPI_INTRODUCED_IN(30)
252         __NNAPI_DEPRECATED_IN(35);
253 
254 /**
255  * Indicate that we have finished modifying a memory descriptor. Required before calling
256  * {@link ANeuralNetworksMemory_createFromDesc}.
257  *
258  * This function must only be called once for a given memory descriptor.
259  *
260  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
261  *
262  * Available since NNAPI feature level 4.
263  *
264  * @param desc The memory descriptor to be finished.
265  *
266  * @return ANEURALNETWORKS_NO_ERROR if successful.
267  * @deprecated NNAPI is deprecated. See
268  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
269  *   for more details.
270  */
271 int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30)
272         __NNAPI_DEPRECATED_IN(35);
273 
274 /**
275  * Creates a memory object from a memory descriptor.
276  *
277  * The memory object is created with an uninitialized buffer. A memory object with an uninitialized
278  * buffer may only be used according to the roles specified by {@link
279  * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link
280  * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object
281  * is used as an output in a successful execution, or used as the destination memory in a successful
282  * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used
283  * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or
284  * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will
285  * return to the uninitialized state if the memory object is used as an output in a failed
286  * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}.
287  *
288  * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding
289  * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
290  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to
291  * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have
292  * unspecified dimensions or rank. In such a case, the same memory object may be used with different
293  * shapes of outputs in different executions. When the memory is used as an input, the input shape
294  * must be the same as the output shape from the last execution using this memory object as an
295  * output, or the last {@link ANeuralNetworksMemory_copy} using this memory object as the
296  * destination memory. Creating a memory object with unspecified dimensions or rank may fail for
297  * certain sets of roles.
298  *
299  * Using the memory in roles or shapes that are not compatible with the rules specified above will
300  * return an error.
301  *
302  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
303  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
304  * both offset and length must be set to zero and the entire memory region will be
305  * associated with the specified input or output operand.
306  *
307  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this
308  * function will return an error.
309  *
310  * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed.
311  *
312  * Attempting to create memory from an unfinished memory descriptor will return an error.
313  *
314  * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory}
315  * object.
316  *
317  * Available since NNAPI feature level 4.
318  *
319  * @param desc The memory descriptor.
320  * @param memory The memory object to be created.
321  *               Set to NULL if unsuccessful.
322  *
323  * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is
324  *         created with unspecified dimensions or rank and it is not supported for this set of
325  *         roles.
326  * @deprecated NNAPI is deprecated. See
327  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
328  *   for more details.
329  */
330 int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc,
331                                          ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(30)
332         __NNAPI_DEPRECATED_IN(35);
333 
334 /**
335  * Copies data from one memory object to another.
336  *
337  * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc},
338  * the src and dst must have the same logical size:
339  * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created
340  *   from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
341  *   AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
342  * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a
343  *   format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is
344  *   no padding and the data is tightly packed. This function may fail if the AHardwareBuffer
345  *   cannot be accessed.
346  * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size
347  *   equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This
348  *   function will fail if the number of elements is unknown.
349  *
350  * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have
351  * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but
352  * have different values, or if there is at least one axis that is fully specified in both but has
353  * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions
354  * of dst will get updated according to the dimensions of the src.
355  *
356  * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must
357  * have been used as an output in a successful execution, or used as the destination memory in a
358  * successful {@link ANeuralNetworksMemory_copy}.
359  *
360  * The src and dst may have different data layout, in which case the data copying is performed
361  * logically with data layout transformation.
362  *
363  * Available since NNAPI feature level 4.
364  *
365  * @param src The source memory object.
366  * @param dst The destination memory object.
367  *
368  * @return ANEURALNETWORKS_NO_ERROR if successful.
369  * @deprecated NNAPI is deprecated. See
370  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
371  *   for more details.
372  */
373 int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst)
374         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
375 
376 /**
377  * Get the number of available devices.
378  *
379  * @param numDevices Used to return the number of devices.
380  *
381  * @return ANEURALNETWORKS_NO_ERROR if successful.
382  *
383  * Available since NNAPI feature level 3.
384  * @deprecated NNAPI is deprecated. See
385  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
386  *   for more details.
387  */
388 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __NNAPI_INTRODUCED_IN(29)
389         __NNAPI_DEPRECATED_IN(35);
390 
391 /**
392  * Get the representation of the specified device.
393  *
394  * @param devIndex The index of the specified device. Must be less than the
395                    number of available devices.
396  * @param device The representation of the specified device.
397  *               The same representation will always be returned for the specified
398  *               device.
399  *
400  * @return ANEURALNETWORKS_NO_ERROR if successful.
401  *
402  * Available since NNAPI feature level 3.
403  * @deprecated NNAPI is deprecated. See
404  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
405  *   for more details.
406  */
407 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device)
408         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
409 
410 /**
411  * Get the name of the specified device.
412  *
413  * @param device The representation of the specified device.
414  * @param name   The returned name of the specified device. The name will be in UTF-8
415  *               and will be null-terminated. It will be recognizable as a known device name
416  *               rather than a cryptic string. For devices with feature level reported by
417  *               {@link ANeuralNetworksDevice_getFeatureLevel} that is
418  *               {@link ANEURALNETWORKS_FEATURE_LEVEL_3} and higher, the format of the name is
419  *               {VENDOR}-{DEVICE}. For devices with feature level
420  *               {@link ANEURALNETWORKS_FEATURE_LEVEL_2} or lower, the format of the name is
421  *               undefined. The name will remain valid for the duration of the application.
422  *
423  * @return ANEURALNETWORKS_NO_ERROR if successful.
424  *
425  * Available since NNAPI feature level 3.
426  * @deprecated NNAPI is deprecated. See
427  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
428  *   for more details.
429  */
430 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name)
431         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
432 
433 /**
434  * Get the type of a given device.
435  *
436  * The device type can be used to help application developers to distribute Machine Learning
437  * workloads and other workloads such as graphical rendering.
438  * E.g., for an app which renders AR scenes based on real time object detection results,
439  * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU
440  * for graphical rendering.
441  *
442  * @param device The representation of the specified device.
443  * @param type The returned {@link DeviceTypeCode} of the specified device.
444  *
445  * @return ANEURALNETWORKS_NO_ERROR if successful.
446  *
447  * Available since NNAPI feature level 3.
448  * @deprecated NNAPI is deprecated. See
449  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
450  *   for more details.
451  */
452 int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type)
453         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
454 
455 /**
456  * Get the version of the driver implementation of the specified device.
457  *
458  * It’s the responsibility of the driver implementor to insure that this version string
459  * uniquely distinguishes this implementation from all previous implementations.
460  *
461  * This version string must not be confused with the feature level which is solely defined
462  * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.
463  * For example, it is not possible to filter all drivers older than a certain version.
464  *
465  * Application developers may use this version string to avoid or prefer specific driver
466  * implementations. For example, an application may want to do so because:
467  *     - A specific version of the driver does not provide the required performance,
468  *       perhaps because of a performance regression.
469  *     - A specific version of the driver has a bug or returns results that don’t match
470  *       the minimum precision requirement for the application.
471  *
472  * @param device The representation of the specified device.
473  * @param version The returned version string of the driver for the specified device. The
474  *                string will be in UTF-8 and will be null-terminated. For devices with feature
475  *                level 28 or lower, "UNKNOWN" will be returned. The version string will remain
476  *                valid for the duration of the application.
477  *
478  * @return ANEURALNETWORKS_NO_ERROR if successful.
479  *
480  * Available since NNAPI feature level 3.
481  * @deprecated NNAPI is deprecated. See
482  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
483  *   for more details.
484  */
485 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version)
486         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
487 
488 /**
489  * Get the NNAPI feature level of the specified NNAPI device.
490  *
491  * Each device has a supported feature level, which is the most advanced NNAPI specification
492  * and features this driver implements. For example, if the driver implements the features
493  * introduced in {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, but does not implement the features
494  * introduced after {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, the value would be
495  * {@link ANEURALNETWORKS_FEATURE_LEVEL_2}. Developers could decide whether or not the specified
496  * device should be used for a model that has certain feature requirements.
497  *
498  * NNAPI device feature level is closely related to NNAPI runtime feature level
499  * ({@link ANeuralNetworks_getRuntimeFeatureLevel}), which indicates an NNAPI runtime feature
500  * level (the most advanced NNAPI specification and features that the runtime implements).
501  * An NNAPI device feature level is always less than or equal to the runtime feature level.
502  *
503  * This function produces a {@link FeatureLevelCode} enum value, NOT an Android API level.
504  *
505  * @param device The representation of the specified device.
506  * @param featureLevel {@link FeatureLevelCode} of the most advanced feature this driver implements.
507  *
508  * @return ANEURALNETWORKS_NO_ERROR if successful.
509  *
510  * Available since NNAPI feature level 3.
511  * @deprecated NNAPI is deprecated. See
512  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
513  *   for more details.
514  */
515 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
516                                           int64_t* featureLevel) __NNAPI_INTRODUCED_IN(29)
517         __NNAPI_DEPRECATED_IN(35);
518 
519 /**
520  * Wait until the device is in a live state.
521  *
522  * A device may encounter internal errors and temporarily enter a dead state. A
523  * call that uses a device in such a state will return with the error
524  * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until
525  * the device is in a live state.
526  *
527  * @param device The representation of the specified device.
528  *
529  * @return ANEURALNETWORKS_NO_ERROR if successful.
530  *
531  * Available since NNAPI feature level 4.
532  * @deprecated NNAPI is deprecated. See
533  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
534  *   for more details.
535  */
536 int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __NNAPI_INTRODUCED_IN(30)
537         __NNAPI_DEPRECATED_IN(35);
538 
539 /**
540  * Get the supported operations for a specified set of devices. If multiple devices
541  * are selected, the supported operation list is a union of supported operations of all
542  * selected devices.
543  *
544  * @param model The model to be queried.
545  * @param devices The set of devices. Must not contain duplicates.
546  * @param numDevices The number of devices in the set.
547  * @param supportedOps The boolean array to be filled. True means supported. The size of the
548  *                     boolean array must be at least as large as the number of operations
549  *                     in the model. The order of elements in the supportedOps array matches
550  *                     the order in which the corresponding operations were added to the model.
551  *
552  * @return ANEURALNETWORKS_NO_ERROR if successful.
553  *
554  * Available since NNAPI feature level 3.
555  * @deprecated NNAPI is deprecated. See
556  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
557  *   for more details.
558  */
559 int ANeuralNetworksModel_getSupportedOperationsForDevices(
560         const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
561         uint32_t numDevices, bool* supportedOps) __NNAPI_INTRODUCED_IN(29)
562         __NNAPI_DEPRECATED_IN(35);
563 
564 /**
565  * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set
566  * of devices. If more than one device is specified, the compilation will
567  * distribute the workload automatically across the devices. The model must be fully
568  * supported by the specified set of devices. This means that
569  * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every
570  * operation for that model/devices pair.
571  *
572  * The user must handle all compilation and execution failures from the
573  * specified set of devices. This is in contrast to a use of {@link
574  * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover
575  * from such failures.
576  *
577  * The model passed to this function is termed the "main model" of the
578  * compilation, to distinguish it from other models referred to by an Operand
579  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
580  *
581  * @param model The {@link ANeuralNetworksModel} to be compiled.
582  * @param devices The set of devices. Must not contain duplicates.
583  * @param numDevices The number of devices in the set.
584  * @param compilation The newly created object or NULL if unsuccessful.
585  *
586  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
587  *         if the model is invalid.
588  *
589  * Available since NNAPI feature level 3.
590  * @deprecated NNAPI is deprecated. See
591  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
592  *   for more details.
593  */
594 int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model,
595                                                 const ANeuralNetworksDevice* const* devices,
596                                                 uint32_t numDevices,
597                                                 ANeuralNetworksCompilation** compilation)
598         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
599 
600 /**
601  * Sets the compilation caching signature and the cache directory.
602  *
603  * Provides optional caching information to the runtime for faster repeated
604  * compilation.
605  *
606  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
607  *
608  * @param compilation The compilation to be modified.
609  * @param cacheDir The cache directory for the runtime to store and retrieve caching
610  *                 data. It is recommended to use the code cache directory provided
611  *                 by the Android runtime. If not using the code cache directory, the
612  *                 user should choose a directory local to the application, and is
613  *                 responsible for managing the cache entries.
614  * @param token The token provided by the user to specify a model must be of length
615  *              ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that
616  *              the token is unique to a model within the application. The NNAPI
617  *              runtime cannot detect token collisions; a collision will result in a
618  *              failed execution or in a successful execution that produces incorrect
619  *              output values.
620  *
621  * @return ANEURALNETWORKS_NO_ERROR if successful.
622  *
623  * Available since NNAPI feature level 3.
624  * @deprecated NNAPI is deprecated. See
625  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
626  *   for more details.
627  */
628 int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation,
629                                           const char* cacheDir, const uint8_t* token)
630         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
631 
632 /**
633  * Schedule synchronous evaluation of the execution.
634  *
635  * <p>Schedules synchronous evaluation of the execution. Returns once the
636  * execution has completed and the outputs are ready to be consumed.
637  * </p>
638  *
639  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
640  * and the execution is not able to complete before the timeout duration is
641  * exceeded, then execution may be aborted, in which case
642  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. If the device has
643  * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel}
644  * that is lower than 30, then the timeout duration hint will be ignored.
645  *
646  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
647  * the condition model does not output false within the loop timeout duration,
648  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
649  * will be returned.
650  *
651  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
652  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
653  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
654  * the execution is in the completed state.
655  *
656  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
657  *
658  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
659  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
660  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
661  * asynchronous execution with dependencies.
662  *
663  * Available since NNAPI feature level 3.
664  *
665  * @param execution The execution to be scheduled and executed.
666  *
667  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
668  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
669  *         be properly mapped.
670  * @deprecated NNAPI is deprecated. See
671  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
672  *   for more details.
673  */
674 int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(29)
675         __NNAPI_DEPRECATED_IN(35);
676 
677 /**
678  * Get the dimensional information of the specified output operand of the model of the
679  * latest computation evaluated on {@link ANeuralNetworksExecution}.
680  *
681  * This function may only be invoked when the execution is in the completed state.
682  *
683  * See {@link ANeuralNetworksExecution} for information on execution states.
684  *
685  * @param execution The execution to be queried.
686  * @param index The index of the output argument we are querying. It is
687  *              an index into the lists passed to
688  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
689  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
690  * @param rank The rank of the output operand.
691  *
692  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
693  *         if the target output is provided an insufficient buffer at execution time,
694  *         ANEURALNETWORKS_BAD_DATA if the index is invalid.
695  *
696  * Available since NNAPI feature level 3.
697  * @deprecated NNAPI is deprecated. See
698  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
699  *   for more details.
700  */
701 int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution,
702                                                   int32_t index, uint32_t* rank)
703         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
704 
705 /**
706  * Get the dimensional information of the specified output operand of the model of the
707  * latest computation evaluated on {@link ANeuralNetworksExecution}. The target output operand
708  * cannot be a scalar.
709  *
710  * This function may only be invoked when the execution is in the completed state.
711  *
712  * See {@link ANeuralNetworksExecution} for information on execution states.
713  *
714  * @param execution The execution to be queried.
715  * @param index The index of the output argument we are querying. It is an index into the lists
716  *              passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
717  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
718  * @param dimensions The dimension array to be filled. The size of the array must be exactly as
719  *                   large as the rank of the output operand to be queried in the model.
720  *
721  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
722  *         if the target output is provided an insufficient buffer at execution time,
723  *         ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar.
724  *
725  * Available since NNAPI feature level 3.
726  * @deprecated NNAPI is deprecated. See
727  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
728  *   for more details.
729  */
730 int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution,
731                                                         int32_t index, uint32_t* dimensions)
732         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
733 
734 /**
735  * Create a {@link ANeuralNetworksBurst} to apply the given compilation.
736  * This only creates the burst object. Computation is only performed once
737  * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid
738  * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.
739  *
740  * <p>The provided compilation must outlive the burst object.</p>
741  *
742  * Available since NNAPI feature level 3.
743  *
744  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
745  * @param burst The newly created object or NULL if unsuccessful.
746  *
747  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
748  *         if the compilation is invalid.
749  * @deprecated NNAPI is deprecated. See
750  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
751  *   for more details.
752  */
753 int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation,
754                                 ANeuralNetworksBurst** burst) __NNAPI_INTRODUCED_IN(29)
755         __NNAPI_DEPRECATED_IN(35);
756 
757 /**
758  * Destroys the burst object.
759  *
760  * Available since NNAPI feature level 3.
761  *
762  * @param burst The burst object to be destroyed. Passing NULL is acceptable and
763  *              results in no operation.
764  * @deprecated NNAPI is deprecated. See
765  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
766  *   for more details.
767  */
768 void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29)
769         __NNAPI_DEPRECATED_IN(35);
770 
771 /**
772  * Schedule synchronous evaluation of the execution on a burst object.
773  *
774  * <p>Schedules synchronous evaluation of the execution. Returns once the
775  * execution has completed and the outputs are ready to be consumed.</p>
776  *
777  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution,
778  * and the execution is not able to complete before the timeout duration is
779  * exceeded, then execution may be aborted, in which case
780  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
781  *
782  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
783  * the condition model does not output false within the loop timeout duration,
784  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
785  * will be returned. If the device has a feature level reported by
786  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
787  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will be ignored.
788  *
789  * <p>There must be at most one {@link ANeuralNetworksExecution} processing at
790  * any given time for any given burst object. Any
791  * {@link ANeuralNetworksExecution} launched before the previous has finished
792  * will result in ANEURALNETWORKS_BAD_STATE.</p>
793  *
794  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
795  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
796  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
797  * the execution is in the completed state.
798  *
799  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
800  *
801  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
802  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
803  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
804  * asynchronous execution with dependencies.
805  *
806  * Available since NNAPI feature level 3.
807  *
808  * @param burst The burst object to execute on.
809  * @param execution The execution to be scheduled and executed. The execution
810  *                  must be created from the same {@link
811  *                  ANeuralNetworksCompilation} as the burst object.
812  *
813  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
814  * @deprecated NNAPI is deprecated. See
815  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
816  *   for more details.
817  */
818 int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution,
819                                           ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29)
820         __NNAPI_DEPRECATED_IN(35);
821 
822 /**
823  * Creates a shared memory object from an AHardwareBuffer handle.
824  *
825  * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB
826  * format, it can be used the same way as shared memory created from a file handle. See
827  * {@link ANeuralNetworksMemory} for a description on how to use this shared memory.
828  *
829  * If the shared memory is backed by an AHardwareBuffer of a format other than
830  * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for model inputs and outputs.
831  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
832  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both
833  * offset and length must be set to zero and the entire memory region will be
834  * associated with the specified input or output operand. There is no guarantee
835  * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination
836  * can be used by arbitrary devices. The execution will fail if the selected set of
837  * devices cannot consume the buffer.
838  *
839  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory
840  * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is
841  * disallowed.
842  *
843  * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object.
844  *
845  * Available since NNAPI feature level 3.
846  *
847  * @param ahwb The AHardwareBuffer handle.
848  * @param memory The memory object to be created.
849  *               Set to NULL if unsuccessful.
850  *
851  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
852  *
853  * @see AHardwareBuffer
854  * @deprecated NNAPI is deprecated. See
855  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
856  *   for more details.
857  */
858 #ifdef __ANDROID__
859 int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb,
860                                                     ANeuralNetworksMemory** memory)
861         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
862 #endif  // __ANDROID__
863 
864 /**
865 
866  * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be
867  * measured. Evaluation of the execution must not have been scheduled.
868  *
869  * By default, duration is not measured.
870  *
871  * The {@link ANeuralNetworksExecution} must have been created from an
872  * {@link ANeuralNetworksCompilation} which in turn was created from
873  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.
874  * If the device has a feature level reported by
875  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
876  * {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, then the duration will not be measured.
877  *
878  * This function may only be invoked when the execution is in the preparation state.
879  *
880  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
881  *
882  * Available since NNAPI feature level 3.
883  *
884  * @param execution The execution to be modified.
885  * @param measure 'true' if duration is to be measured, 'false' if not.
886  *
887  * @return ANEURALNETWORKS_NO_ERROR if successful.
888  * @deprecated NNAPI is deprecated. See
889  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
890  *   for more details.
891  */
892 int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure)
893         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
894 
895 /**
896  * Get the time spent in the latest computation evaluated on the specified
897  * {@link ANeuralNetworksExecution}, in nanoseconds.
898  *
899  * This function may only be invoked when the execution is in the completed state.
900  *
901  * See {@link ANeuralNetworksExecution} for information on execution states.
902  *
903  * @param execution The execution to be queried.
904  * @param durationCode The measurement to be queried, specified by {@link DurationCode}.
905  * @param duration The returned duration. If no measurement was requested by
906  *                 {@link ANeuralNetworksExecution_setMeasureTiming}, if the
907  *                 device is has a feature level reported by
908  *                 {@link ANeuralNetworksDevice_getFeatureLevel} that is lower
909  *                 than {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, or for some other
910  *                 reason the duration is not available, UINT64_MAX will be returned.
911  *                 A particular device need not support any given measurement.
912  *
913  * @return ANEURALNETWORKS_NO_ERROR if successful.
914  *
915  * Available since NNAPI feature level 3.
916  * @deprecated NNAPI is deprecated. See
917  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
918  *   for more details.
919  */
920 int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution,
921                                          int32_t durationCode, uint64_t* duration)
922         __NNAPI_INTRODUCED_IN(29) __NNAPI_DEPRECATED_IN(35);
923 
924 /**
925  * Creates a shared memory object from a file descriptor.
926  *
927  * The shared memory is backed by a file descriptor via mmap.
928  * See {@link ANeuralNetworksMemory} for a description on how to use
929  * this shared memory.
930  *
931  * Available since NNAPI feature level 1.
932  *
933  * @param size The requested size in bytes.
934  *             Must not be larger than the file size.
935  * @param protect The desired memory protection for the mapping.
936  *             It is either PROT_NONE or the bitwise OR of one or
937  *             more of the following flags: PROT_READ, PROT_WRITE.
938  * @param fd The requested file descriptor.
939  *           The file descriptor has to be mmap-able. The file
940  *           descriptor will be duplicated.
941  * @param offset The offset to the beginning of the file of the area to map.
942  *               The offset has to be aligned to a page size.
943  * @param memory The memory object to be created.
944  *               Set to NULL if unsuccessful.
945  *
946  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
947  * @deprecated NNAPI is deprecated. See
948  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
949  *   for more details.
950  */
951 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
952                                        ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(27)
953         __NNAPI_DEPRECATED_IN(35);
954 
955 /**
956  * Delete a memory object.
957  *
958  * Destroys the object used by the run time to keep track of the memory.
959  * This will free the underlying actual memory if no other code has open
960  * handles to this memory.
961  *
962  * Available since NNAPI feature level 1.
963  *
964  * @param memory The memory object to be freed. Passing NULL is acceptable and
965  *               results in no operation.
966  * @deprecated NNAPI is deprecated. See
967  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
968  *   for more details.
969  */
970 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __NNAPI_INTRODUCED_IN(27)
971         __NNAPI_DEPRECATED_IN(35);
972 
973 /**
974  * Create an empty {@link ANeuralNetworksModel}.
975  *
976  * <p>This only creates the object. Computation is performed once
977  * {@link ANeuralNetworksExecution_burstCompute},
978  * {@link ANeuralNetworksExecution_compute},
979  * {@link ANeuralNetworksExecution_startCompute} or
980  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
981  *
982  * The model should be constructed with calls to
983  * {@link ANeuralNetworksModel_addOperation} and
984  * {@link ANeuralNetworksModel_addOperand}
985  *
986  * <p>{@link ANeuralNetworksModel_finish} should be called once the model
987  * has been fully constructed.</p>
988  *
989  * <p>{@link ANeuralNetworksModel_free} should be called once the model
990  * is no longer needed.</p>
991  *
992  * Available since NNAPI feature level 1.
993  *
994  * @param model The {@link ANeuralNetworksModel} to be created.
995  *              Set to NULL if unsuccessful.
996  *
997  * @return ANEURALNETWORKS_NO_ERROR if successful.
998  * @deprecated NNAPI is deprecated. See
999  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1000  *   for more details.
1001  */
1002 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __NNAPI_INTRODUCED_IN(27)
1003         __NNAPI_DEPRECATED_IN(35);
1004 
1005 /**
1006  * Destroy a model.
1007  *
1008  * The model need not have been finished by a call to
1009  * {@link ANeuralNetworksModel_finish}.
1010  *
1011  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1012  *
1013  * Available since NNAPI feature level 1.
1014  *
1015  * @param model The model to be destroyed. Passing NULL is acceptable and
1016  *              results in no operation.
1017  * @deprecated NNAPI is deprecated. See
1018  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1019  *   for more details.
1020  */
1021 void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27)
1022         __NNAPI_DEPRECATED_IN(35);
1023 
1024 /**
1025  * Indicate that we have finished modifying a model. Required before
1026  * calling {@link ANeuralNetworksCompilation_create} and
1027  * {@link ANeuralNetworksCompilation_createForDevices}.
1028  *
1029  * An application must ensure that no other thread uses the model at the same
1030  * time.
1031  *
1032  * This function must only be called once for a given model.
1033  *
1034  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1035  *
1036  * Available since NNAPI feature level 1.
1037  *
1038  * @param model The model to be finished.
1039  *
1040  * @return ANEURALNETWORKS_NO_ERROR if successful.
1041  * @deprecated NNAPI is deprecated. See
1042  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1043  *   for more details.
1044  */
1045 int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27)
1046         __NNAPI_DEPRECATED_IN(35);
1047 
1048 /**
1049  * Add an operand to a model.
1050  *
1051  * The order in which the operands are added is important. The first one added
1052  * to a model will have the index value 0, the second 1, etc. These indexes are
1053  * used as operand identifiers in
1054  * {@link ANeuralNetworksModel_addOperation},
1055  * {@link ANeuralNetworksModel_identifyInputsAndOutputs},
1056  * {@link ANeuralNetworksModel_setOperandValue},
1057  * {@link ANeuralNetworksModel_setOperandValueFromMemory},
1058  * {@link ANeuralNetworksExecution_setInput},
1059  * {@link ANeuralNetworksExecution_setInputFromMemory},
1060  * {@link ANeuralNetworksExecution_setOutput}, and
1061  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
1062  *
1063  * <p>Every operand must be referenced in exactly one of the following
1064  * ways:<ul>
1065  *    <li>It is identified as a model input with
1066  *        {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>
1067  *    <li>It is identified as a constant with
1068  *        {@link ANeuralNetworksModel_setOperandValue} or
1069  *        {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
1070  *    <li>It is identified as an output of exactly one operation with
1071  *        {@link ANeuralNetworksModel_addOperation}.</li>
1072  *    </ul></p>
1073  * <p>An operand that is identified as a model input or as a constant
1074  * must not also be identified as a model output with
1075  * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>
1076  *
1077  * To build a model that can accommodate inputs of various sizes, as
1078  * you may want to do for a CNN, leave unspecified the dimensions that
1079  * will vary at run time.  If you do so, fully specify dimensions
1080  * when calling {@link ANeuralNetworksExecution_setInput} or
1081  * {@link ANeuralNetworksExecution_setInputFromMemory}.
1082  *
1083  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1084  * called will return an error.
1085  *
1086  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1087  *
1088  * Available since NNAPI feature level 1.
1089  *
1090  * @param model The model to be modified.
1091  * @param type The {@link ANeuralNetworksOperandType} that describes the shape
1092  *             of the operand.  Neither the {@link ANeuralNetworksOperandType}
1093  *             nor the dimensions it points to need to outlive the call to
1094  *             {@link ANeuralNetworksModel_addOperand}.
1095  *
1096  * @return ANEURALNETWORKS_NO_ERROR if successful.
1097  * @deprecated NNAPI is deprecated. See
1098  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1099  *   for more details.
1100  */
1101 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
1102                                     const ANeuralNetworksOperandType* type)
1103         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1104 
1105 /**
1106  * Sets an operand to a constant value.
1107  *
1108  * Values of length smaller or equal to
1109  * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES
1110  * are immediately copied into the model.
1111  *
1112  * For values of length greater than
1113  * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, a pointer to
1114  * the buffer is stored within the model. The application must not change the
1115  * content of this region until all executions using this model have
1116  * completed. As the data may be copied during processing, modifying the data
1117  * after this call yields undefined results. The provided buffer must outlive
1118  * this model.
1119  *
1120  * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
1121  * is likely to be more efficient.
1122  *
1123  * To indicate that an optional operand should be considered missing,
1124  * pass nullptr for buffer and 0 for length.
1125  *
1126  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1127  * called will return an error.
1128  *
1129  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1130  *
1131  * Available since NNAPI feature level 1.
1132  *
1133  * @param model The model to be modified.
1134  * @param index The index of the model operand we're setting.
1135  * @param buffer A pointer to the data to use.
1136  * @param length The size in bytes of the data value.
1137  *
1138  * @return ANEURALNETWORKS_NO_ERROR if successful.
1139  * @deprecated NNAPI is deprecated. See
1140  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1141  *   for more details.
1142  */
1143 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
1144                                          const void* buffer, size_t length)
1145         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1146 
1147 /**
1148  * Sets an operand's per channel quantization parameters.
1149  *
1150  * Sets parameters required by a tensor of type
1151  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.
1152  * This function must be called for every tensor of type
1153  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before
1154  * calling {@link ANeuralNetworksModel_finish}.
1155  *
1156  * Available since NNAPI feature level 3.
1157  *
1158  * @param model The model to be modified.
1159  * @param index The index of the model operand we're setting.
1160  * @param channelQuant The per channel quantization parameters for the operand.
1161  *                    No memory in this struct needs to outlive the call to
1162  *                    this function.
1163  *
1164  * @return ANEURALNETWORKS_NO_ERROR if successful.
1165  * @deprecated NNAPI is deprecated. See
1166  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1167  *   for more details.
1168  */
1169 int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
1170         ANeuralNetworksModel* model, int32_t index,
1171         const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __NNAPI_INTRODUCED_IN(29)
1172         __NNAPI_DEPRECATED_IN(35);
1173 
1174 /**
1175  * Sets an operand to a value stored in a memory object.
1176  *
1177  * The content of the memory is not copied. A reference to that memory is stored
1178  * inside the model. The application must not change the content of the memory
1179  * region until all executions using this model have completed.  As the data may
1180  * be copied during processing, modifying the data after this call yields
1181  * undefined results.
1182  *
1183  * <p>The provided memory must outlive this model.</p>
1184  *
1185  * To indicate that an optional operand should be considered missing,
1186  * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
1187  *
1188  * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer
1189  * of a format other than AHARDWAREBUFFER_FORMAT_BLOB.
1190  *
1191  * It is disallowed to set an operand value with memory created from
1192  * {@link ANeuralNetworksMemory_createFromDesc}.
1193  *
1194  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1195  * called will return an error.
1196  *
1197  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1198  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1199  * AHardwareBuffer usage.
1200  *
1201  * Available since NNAPI feature level 1.
1202  *
1203  * @param model The model to be modified.
1204  * @param index The index of the model operand we're setting.
1205  * @param memory The memory containing the data.
1206  * @param offset This specifies the location of the data within the memory.
1207  *               The offset is in bytes from the start of memory.
1208  * @param length The size in bytes of the data value.
1209  *
1210  * @return ANEURALNETWORKS_NO_ERROR if successful.
1211  * @deprecated NNAPI is deprecated. See
1212  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1213  *   for more details.
1214  */
1215 int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index,
1216                                                    const ANeuralNetworksMemory* memory,
1217                                                    size_t offset, size_t length)
1218         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1219 
1220 /**
1221  * Sets an operand to a value that is a reference to another NNAPI model.
1222  *
1223  * The referenced model must already have been finished by a call to
1224  * {@link ANeuralNetworksModel_finish}.
1225  *
1226  * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of
1227  * referenced models is overridden by that setting of the main model of a
1228  * compilation.
1229  *
1230  * The referenced model must outlive the model referring to it.
1231  *
1232  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
1233  * been called will return an error.
1234  *
1235  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1236  *
1237  * Available since NNAPI feature level 4.
1238  *
1239  * @param model The model to be modified.
1240  * @param index The index of the model operand we're setting.
1241  * @param value The model to be referenced.
1242  *
1243  * @return ANEURALNETWORKS_NO_ERROR if successful.
1244  * @deprecated NNAPI is deprecated. See
1245  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1246  *   for more details.
1247  */
1248 int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index,
1249                                                   const ANeuralNetworksModel* value)
1250         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
1251 
1252 /**
1253  * Add an operation to a model.
1254  *
1255  * @param model The model to be modified.
1256  * @param type The {@link ANeuralNetworksOperationType} of the operation.
1257  * @param inputCount The number of entries in the inputs array.
1258  * @param inputs An array of indexes identifying each operand.
1259  * @param outputCount The number of entries in the outputs array.
1260  * @param outputs An array of indexes identifying each operand.
1261  *
1262  * The operands specified by inputs and outputs must have been
1263  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1264  *
1265  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1266  * called will return an error.
1267  *
1268  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1269  *
1270  * Available since NNAPI feature level 1.
1271  *
1272  * @return ANEURALNETWORKS_NO_ERROR if successful.
1273  * @deprecated NNAPI is deprecated. See
1274  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1275  *   for more details.
1276  */
1277 int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
1278                                       ANeuralNetworksOperationType type, uint32_t inputCount,
1279                                       const uint32_t* inputs, uint32_t outputCount,
1280                                       const uint32_t* outputs) __NNAPI_INTRODUCED_IN(27)
1281         __NNAPI_DEPRECATED_IN(35);
1282 
1283 /**
1284  * Specifies which operands will be the model's inputs and
1285  * outputs. Every model must have at least one input and one output.
1286  *
1287  * An operand cannot be used for both input and output. Doing so will
1288  * return an error.
1289  *
1290  * @param model The model to be modified.
1291  * @param inputCount The number of entries in the inputs array.
1292  * @param inputs An array of indexes identifying the input operands.
1293  * @param outputCount The number of entries in the outputs array.
1294  * @param outputs An array of indexes identifying the output operands.
1295  *
1296  * The operands specified by inputs and outputs must have been
1297  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1298  *
1299  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1300  * called will return an error.
1301  *
1302  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1303  *
1304  * Available since NNAPI feature level 1.
1305  *
1306  * @deprecated NNAPI is deprecated. See
1307  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1308  *   for more details.
1309  */
1310 int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
1311                                                   const uint32_t* inputs, uint32_t outputCount,
1312                                                   const uint32_t* outputs) __NNAPI_INTRODUCED_IN(27)
1313         __NNAPI_DEPRECATED_IN(35);
1314 
1315 /**
1316  * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
1317  * calculated with range and/or precision as low as that of the IEEE 754 16-bit
1318  * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1319  * must be calculated using at least the range and precision of the IEEE 754
1320  * 32-bit floating-point format.
1321  *
1322  * The relaxComputationFloat32toFloat16 setting of the main model of
1323  * a compilation overrides the values of the referenced models.
1324  *
1325  * @param model The model to be modified.
1326  * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
1327  *              calculated with range and/or precision as low as that of the
1328  *              IEEE 754 16-bit floating point format. 'false' indicates
1329  *              {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
1330  *              at least the range and precision of the IEEE 754 32-bit floating
1331  *              point format.
1332  *
1333  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1334  * called will return an error.
1335  *
1336  * Available since NNAPI feature level 2.
1337  *
1338  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1339  * @deprecated NNAPI is deprecated. See
1340  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1341  *   for more details.
1342  */
1343 int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow)
1344         __NNAPI_INTRODUCED_IN(28) __NNAPI_DEPRECATED_IN(35);
1345 
1346 /**
1347  * Create a {@link ANeuralNetworksCompilation} to compile the given model.
1348  *
1349  * The model passed to this function is termed the "main model" of the
1350  * compilation, to distinguish it from other models referred to by an Operand
1351  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
1352  *
1353  * <p>This function only creates the object. Compilation is only performed once
1354  * {@link ANeuralNetworksCompilation_finish} is invoked.</p>
1355  *
1356  * <p>{@link ANeuralNetworksCompilation_finish} should be called once
1357  * all desired properties have been set on the compilation.</p>
1358  *
1359  * <p>{@link ANeuralNetworksModel_free} should be called once the compilation
1360  * is no longer needed.</p>
1361  *
1362  * <p>The provided model must outlive the compilation.</p>
1363  *
1364  * The model must already have been finished by a call to
1365  * {@link ANeuralNetworksModel_finish}.
1366  *
1367  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1368  *
1369  * Available since NNAPI feature level 1.
1370  *
1371  * @param model The {@link ANeuralNetworksModel} to be compiled.
1372  * @param compilation The newly created object or NULL if unsuccessful.
1373  *
1374  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1375  *         if the model is invalid.
1376  * @deprecated NNAPI is deprecated. See
1377  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1378  *   for more details.
1379  */
1380 int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model,
1381                                       ANeuralNetworksCompilation** compilation)
1382         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1383 
1384 /**
1385  * Destroy a compilation.
1386  *
1387  * The compilation need not have been finished by a call to
1388  * {@link ANeuralNetworksCompilation_finish}.
1389  *
1390  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1391  *
1392  * Available since NNAPI feature level 1.
1393  *
1394  * @param compilation The compilation to be destroyed. Passing NULL is acceptable and
1395  *                    results in no operation.
1396  * @deprecated NNAPI is deprecated. See
1397  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1398  *   for more details.
1399  */
1400 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation)
1401         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1402 
1403 /**
1404  * Sets the execution preference.
1405  *
1406  * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime
1407  * uses PREFER_SINGLE_FAST_ANSWER</p>
1408  *
1409  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1410  *
1411  * Available since NNAPI feature level 1.
1412  *
1413  * @param compilation The compilation to be modified.
1414  * @param preference Either {@link ANEURALNETWORKS_PREFER_LOW_POWER},
1415  *                  {@link ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER}, or
1416  *                  {@link ANEURALNETWORKS_PREFER_SUSTAINED_SPEED}.
1417  *
1418  * @return ANEURALNETWORKS_NO_ERROR if successful.
1419  * @deprecated NNAPI is deprecated. See
1420  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1421  *   for more details.
1422  */
1423 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation,
1424                                              int32_t preference) __NNAPI_INTRODUCED_IN(27)
1425         __NNAPI_DEPRECATED_IN(35);
1426 
1427 /**
1428  * Indicate that we have finished modifying a compilation. Required before
1429  * calling {@link ANeuralNetworksBurst_create} or
1430  * {@link ANeuralNetworksExecution_create}.
1431  *
1432  * An application must ensure that no other thread uses the compilation at the
1433  * same time.
1434  *
1435  * This function must only be called once for a given compilation.
1436  *
1437  * If {@link ANeuralNetworksCompilation_setTimeout} was called on this
1438  * compilation, and the compilation is not able to be finished before the
1439  * timeout duration is exceeded, then compilation may be aborted, in which case
1440  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
1441  *
1442  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1443  *
1444  * Available since NNAPI feature level 1.
1445  *
1446  * @param compilation The compilation to be finished.
1447  *
1448  * @return ANEURALNETWORKS_NO_ERROR if successful.
1449  * @deprecated NNAPI is deprecated. See
1450  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1451  *   for more details.
1452  */
1453 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation)
1454         __NNAPI_INTRODUCED_IN(27) __NNAPI_DEPRECATED_IN(35);
1455 
1456 /**
1457  * Set the execution priority.
1458  *
1459  * Execution priorities are relative to other executions created by the same
1460  * application (specifically same uid) for the same device. Specifically,
1461  * priorities of executions from one application will not affect executions from
1462  * another application. Similarly, priorities of executions on one device will
1463  * not affect executions on another device.
1464  *
1465  * Higher priority executions may use more compute resources than lower priority
1466  * executions, and may preempt or starve lower priority executions.
1467  *
1468  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1469  *
1470  * Available since NNAPI feature level 4.
1471  *
1472  * @param compilation The compilation to be modified.
1473  * @param priority The relative priority of the execution compared to other
1474  *     executions created by the application. Must be one of
1475  *     ANEURALNETWORKS_PRIORITY_*.
1476  *
1477  * @return ANEURALNETWORKS_NO_ERROR if successful.
1478  * @deprecated NNAPI is deprecated. See
1479  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1480  *   for more details.
1481  */
1482 int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority)
1483         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
1484 
1485 /**
1486  * Set the maximum expected duration for compiling the model.
1487  *
1488  * If the device is not able to complete the compilation within the specified
1489  * duration, the compilation may be aborted. The timeout duration begins at the
1490  * call to {@link ANeuralNetworksCompilation_finish}.
1491  *
1492  * This timeout duration acts as a hint to drivers, and can be used to both free
1493  * up compute resources within the driver and return control back to the
1494  * application quicker than is possible without the hint. It enables drivers
1495  * that are able to estimate how long a compilation will take to abort the
1496  * compilation before it has even started if the driver believes the compilation
1497  * cannot be completed within the timeout duration. Similarly, it enables
1498  * drivers to abort an ongoing compilation if it is taking too long. However,
1499  * this call does not guarantee that the compilation will complete or abort
1500  * within the timeout duration.
1501  *
1502  * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called),
1503  * the timeout duration for compiling the model is considered infinite.
1504  *
1505  * The {@link ANeuralNetworksCompilation} must have been created with
1506  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1507  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1508  * device has a feature level reported by
1509  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1510  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1511  * be ignored.
1512  *
1513  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1514  *
1515  * @param compilation The compilation to be modified.
1516  * @param duration The maximum amount of time in nanoseconds that is expected to
1517  *     be spent finishing a compilation. If this duration is exceeded, the
1518  *     compilation may be aborted. If set to 0, the timeout duration is
1519  *     considered infinite.
1520  *
1521  * @return ANEURALNETWORKS_NO_ERROR if successful.
1522  *
1523  * Available since NNAPI feature level 4.
1524  * @deprecated NNAPI is deprecated. See
1525  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1526  *   for more details.
1527  */
1528 int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation,
1529                                           uint64_t duration) __NNAPI_INTRODUCED_IN(30)
1530         __NNAPI_DEPRECATED_IN(35);
1531 
1532 /**
1533  * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
1534  * This only creates the object. Computation is only performed once
1535  * {@link ANeuralNetworksExecution_burstCompute},
1536  * {@link ANeuralNetworksExecution_compute},
1537  * {@link ANeuralNetworksExecution_startCompute} or
1538  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
1539  *
1540  * <p>The provided compilation must outlive the execution.</p>
1541  *
1542  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1543  *
1544  * Available since NNAPI feature level 1.
1545  *
1546  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
1547  * @param execution The newly created object or NULL if unsuccessful.
1548  *
1549  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1550  *         if the compilation is invalid.
1551  * @deprecated NNAPI is deprecated. See
1552  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1553  *   for more details.
1554  */
1555 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
1556                                     ANeuralNetworksExecution** execution) __NNAPI_INTRODUCED_IN(27)
1557         __NNAPI_DEPRECATED_IN(35);
1558 
1559 /**
1560  * Destroy an execution.
1561  *
1562  * <p>The execution need not have been scheduled by a call to
1563  * {@link ANeuralNetworksExecution_burstCompute},
1564  * {@link ANeuralNetworksExecution_compute},
1565  * {@link ANeuralNetworksExecution_startCompute} or
1566  * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled,
1567  * then the application must not call {@link ANeuralNetworksExecution_free}
1568  * until the execution has completed (i.e.,
1569  * {@link ANeuralNetworksExecution_burstCompute},
1570  * {@link ANeuralNetworksExecution_compute}, or
1571  * {@link ANeuralNetworksEvent_wait} has returned).
1572  *
1573  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1574  *
1575  * Available since NNAPI feature level 1.
1576  *
1577  * @param execution The execution to be destroyed. Passing NULL is acceptable and
1578  *                  results in no operation.
1579  * @deprecated NNAPI is deprecated. See
1580  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1581  *   for more details.
1582  */
1583 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(27)
1584         __NNAPI_DEPRECATED_IN(35);
1585 
1586 /**
1587  * Associate a user buffer with an input of the model of the
1588  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1589  * been scheduled. Once evaluation of the execution has been scheduled, the
1590  * application must not change the content of the buffer until the execution has
1591  * completed. Evaluation of the execution will not change the content of the
1592  * buffer.
1593  *
1594  * <p>The provided buffer must outlive the execution.</p>
1595  *
1596  * If the input is optional, you can indicate that it is omitted by
1597  * passing nullptr for buffer and 0 for length.
1598  *
1599  * Otherwise, if the user has not set the execution to accept padded input buffers by
1600  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1601  * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1602  * number of elements). Passing a length argument with value not equal to the raw size of the input
1603  * will result in ANEURALNETWORKS_BAD_DATA.
1604  *
1605  * Otherwise, if the user has set the execution to accept padded input buffers by calling
1606  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1607  * than the raw size of the input, and the extra bytes at the end of the buffer may be used
1608  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1609  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1610  *
1611  * This function may only be invoked when the execution is in the preparation state.
1612  *
1613  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1614  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1615  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1616  * preferred buffer alignment and padding, to improve performance.
1617  *
1618  * Available since NNAPI feature level 1.
1619  *
1620  * @param execution The execution to be modified.
1621  * @param index The index of the input argument we are setting. It is
1622  *              an index into the lists passed to
1623  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1624  *              the index associated with
1625  *              {@link ANeuralNetworksModel_addOperand}.
1626  * @param type The {@link ANeuralNetworksOperandType} of the
1627  *             operand. Unless the input is omitted, this should be
1628  *             used to specify the dimensions that were left
1629  *             unspecified when the operand was added to the
1630  *             model. All other properties of the type must be the
1631  *             same as specified in the model. If the type is the same
1632  *             as specified when the model was built, NULL can be
1633  *             passed. Neither the {@link ANeuralNetworksOperandType}
1634  *             nor the dimensions it points to need to outlive the call
1635  *             to {@link ANeuralNetworksExecution_setInput}.
1636  * @param buffer The buffer containing the data.
1637  * @param length The size of the data value in bytes plus any end padding.
1638  *
1639  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1640  *         name is not recognized or the buffer is too small for the input.
1641  * @deprecated NNAPI is deprecated. See
1642  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1643  *   for more details.
1644  */
1645 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
1646                                       const ANeuralNetworksOperandType* type, const void* buffer,
1647                                       size_t length) __NNAPI_INTRODUCED_IN(27)
1648         __NNAPI_DEPRECATED_IN(35);
1649 
1650 /**
1651  * Associate a region of a memory object with an input of the model of the
1652  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1653  * been scheduled. Once evaluation of the execution has been scheduled, the
1654  * application must not change the content of the region until the execution has
1655  * completed. Evaluation of the execution will not change the content of the
1656  * region.
1657  *
1658  * <p>The provided memory must outlive the execution.</p>
1659  *
1660  * If the input is optional, you can indicate that it is omitted by
1661  * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for
1662  * buffer and 0 for length.
1663  *
1664  * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1665  * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1666  * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1667  * the whole memory is used.
1668  *
1669  * Otherwise, if the user has not set the execution to accept padded input memory objects by
1670  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1671  * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1672  * number of elements). Passing a length argument with value not equal to the raw size of the input
1673  * will result in ANEURALNETWORKS_BAD_DATA.
1674  *
1675  * Otherwise, if the user has set the execution to accept padded input memory objects by calling
1676  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1677  * than the raw size of the input, and the extra bytes at the end of the memory region may be used
1678  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1679  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1680  *
1681  * This function may only be invoked when the execution is in the preparation state.
1682  *
1683  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1684  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1685  * AHardwareBuffer usage.
1686  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1687  * created from memory descriptors.
1688  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1689  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1690  * preferred memory alignment and padding, to improve performance.
1691  *
1692  * Available since NNAPI feature level 1.
1693  *
1694  * @param execution The execution to be modified.
1695  * @param index The index of the input argument we are setting. It is
1696  *              an index into the lists passed to
1697  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1698  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1699  * @param type The {@link ANeuralNetworksOperandType} of the
1700  *             operand. This should be used to specify the dimensions
1701  *             that were left unspecified when the operand was added
1702  *             to the model. All other properties of the type must be
1703  *             the same as specified in the model. If the type is the
1704  *             same as specified when the model was built, NULL can be
1705  *             passed. Neither the {@link ANeuralNetworksOperandType}
1706  *             nor the dimensions it points to need to outlive the call
1707  *             to {@link ANeuralNetworksExecution_setInputFromMemory}.
1708  * @param memory The memory containing the data.
1709  * @param offset This specifies the location of the data within the memory.
1710  *               The offset is in bytes from the start of memory.
1711  * @param length The size of the data value in bytes plus any end padding.
1712  *
1713  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1714  *         name is not recognized or the buffer is too small for the input.
1715  * @deprecated NNAPI is deprecated. See
1716  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1717  *   for more details.
1718  */
1719 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
1720                                                 const ANeuralNetworksOperandType* type,
1721                                                 const ANeuralNetworksMemory* memory, size_t offset,
1722                                                 size_t length) __NNAPI_INTRODUCED_IN(27)
1723         __NNAPI_DEPRECATED_IN(35);
1724 
1725 /**
1726  * Associate a user buffer with an output of the model of the
1727  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1728  * been scheduled. Once evaluation of the execution has been scheduled, the
1729  * application must not change the content of the buffer until the execution has
1730  * completed.
1731  *
1732  * <p>The provided buffer must outlive the execution.</p>
1733  *
1734  * If the output is optional, you can indicate that it is omitted by
1735  * passing nullptr for buffer and 0 for length.
1736  *
1737  * Otherwise, if the user has not set the execution to accept padded output buffers by
1738  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1739  * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1740  * number of elements). Passing a length argument with value not equal to the raw size of the output
1741  * will result in ANEURALNETWORKS_BAD_DATA.
1742  *
1743  * Otherwise, if the user has set the execution to accept padded output buffers by calling
1744  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1745  * than the raw size of the output, and the extra bytes at the end of the buffer may be used
1746  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1747  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1748  *
1749  * This function may only be invoked when the execution is in the preparation state.
1750  *
1751  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1752  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1753  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1754  * preferred buffer alignment and padding, to improve performance.
1755  *
1756  * Available since NNAPI feature level 1.
1757  *
1758  * @param execution The execution to be modified.
1759  * @param index The index of the output argument we are setting. It is
1760  *              an index into the lists passed to
1761  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1762  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1763  * @param type The {@link ANeuralNetworksOperandType} of the
1764  *             operand. Unless the output is omitted, this should be
1765  *             used to specify the dimensions that were left
1766  *             unspecified when the operand was added to the
1767  *             model. All other properties of the type must be the
1768  *             same as specified in the model. If the type is the same
1769  *             as specified when the model was built, NULL can be
1770  *             passed. Neither the {@link ANeuralNetworksOperandType}
1771  *             nor the dimensions it points to need to outlive the call
1772  *             to {@link ANeuralNetworksExecution_setOutput}.
1773  *             Since NNAPI feature level 3, the output operand can have unspecified
1774  *             dimensions or rank to be deduced dynamically during the execution.
1775  *             However, the user must provide a large enough buffer. The user
1776  *             can retrieve the output dimensional information after the execution
1777  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1778  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1779  * @param buffer The buffer where the data is to be written.
1780  * @param length The size of the data value in bytes plus any end padding.
1781  *
1782  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1783  *         name is not recognized or the buffer is too small for the output.
1784  * @deprecated NNAPI is deprecated. See
1785  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1786  *   for more details.
1787  */
1788 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
1789                                        const ANeuralNetworksOperandType* type, void* buffer,
1790                                        size_t length) __NNAPI_INTRODUCED_IN(27)
1791         __NNAPI_DEPRECATED_IN(35);
1792 
1793 /**
1794  * Associate a region of a memory object with an output of the model of the
1795  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1796  * been scheduled. Once evaluation of the execution has been scheduled, the
1797  * application must not change the content of the region until the execution has
1798  * completed.
1799  *
1800  * <p>The provided memory must outlive the execution.</p>
1801  *
1802  * If the output is optional, you can indicate that it is omitted by
1803  * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for
1804  * buffer and 0 for length.
1805  *
1806  * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1807  * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1808  * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1809  * the whole memory is used.
1810  *
1811  * Otherwise, if the user has not set the execution to accept padded output memory objects by
1812  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1813  * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1814  * number of elements). Passing a length argument with value not equal to the raw size of the output
1815  * will result in ANEURALNETWORKS_BAD_DATA.
1816  *
1817  * Otherwise, if the user has set the execution to accept padded output memory objects by calling
1818  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1819  * than the raw size of the output, and the extra bytes at the end of the memory region may be used
1820  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1821  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1822  *
1823  * This function may only be invoked when the execution is in the preparation state.
1824  *
1825  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1826  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1827  * AHardwareBuffer usage.
1828  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1829  * created from memory descriptors.
1830  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1831  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1832  * preferred memory alignment and padding, to improve performance.
1833  *
1834  * Available since NNAPI feature level 1.
1835  *
1836  * @param execution The execution to be modified.
1837  * @param index The index of the output argument we are setting. It is
1838  *              an index into the lists passed to
1839  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1840  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1841  * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be
1842  *             used to specify the dimensions that were left
1843  *             unspecified when the operand was added to the
1844  *             model. All other properties of the type must be the
1845  *             same as specified in the model. If the type is the same
1846  *             as specified when the model was built, NULL can be
1847  *             passed. Neither the {@link ANeuralNetworksOperandType}
1848  *             nor the dimensions it points to need to outlive the call
1849  *             to {@link ANeuralNetworksExecution_setOutputFromMemory}.
1850  *             Since NNAPI feature level 3, the output operand can have unspecified
1851  *             dimensions or rank to be deduced dynamically during the execution.
1852  *             However, the user must provide a large enough memory. The user
1853  *             can retrieve the output dimensional information after the execution
1854  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1855  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1856  * @param memory The memory where the data is to be stored.
1857  * @param offset This specifies the location of the data within the memory.
1858  *               The offset is in bytes from the start of memory.
1859  * @param length The size of the data value in bytes plus any end padding.
1860  *
1861  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1862  *         name is not recognized or the buffer is too small for the output.
1863  * @deprecated NNAPI is deprecated. See
1864  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1865  *   for more details.
1866  */
1867 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
1868                                                  const ANeuralNetworksOperandType* type,
1869                                                  const ANeuralNetworksMemory* memory, size_t offset,
1870                                                  size_t length) __NNAPI_INTRODUCED_IN(27)
1871         __NNAPI_DEPRECATED_IN(35);
1872 
1873 /**
1874  * Schedule asynchronous evaluation of the execution.
1875  *
1876  * <p>Schedules asynchronous evaluation of the execution. Once the execution
1877  * has completed and the outputs are ready to be consumed, the returned event
1878  * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that
1879  * event.
1880  * </p>
1881  *
1882  * ANeuralNetworksEvent_wait must be called to recuperate the resources used
1883  * by the execution.
1884  *
1885  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
1886  * and the execution is not able to complete before the timeout duration is
1887  * exceeded, then execution may be aborted, in which case
1888  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned through
1889  * {@link ANeuralNetworksExecution_startCompute} or
1890  * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
1891  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
1892  * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout
1893  * duration hint will be ignored.
1894  *
1895  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1896  * the condition model does not output false within the loop timeout duration,
1897  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1898  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
1899  * object.
1900  *
1901  * If the device can detect before the execution has started that the execution
1902  * will not complete within the timeout duration, the device may choose to skip
1903  * the execution and instead return ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}.
1904  *
1905  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
1906  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
1907  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
1908  * the execution is in the completed state.
1909  *
1910  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1911  *
1912  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
1913  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
1914  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
1915  * asynchronous execution with dependencies.
1916  *
1917  * Available since NNAPI feature level 1.
1918  *
1919  * @param execution The execution to be scheduled and executed.
1920  * @param event The event that will be signaled on completion. event is set to
1921  *              NULL if there's an error.
1922  *
1923  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
1924  * @deprecated NNAPI is deprecated. See
1925  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1926  *   for more details.
1927  */
1928 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution,
1929                                           ANeuralNetworksEvent** event) __NNAPI_INTRODUCED_IN(27)
1930         __NNAPI_DEPRECATED_IN(35);
1931 
1932 /**
1933  * Set the maximum expected duration of the specified execution.
1934  *
1935  * If the device is not able to complete the execution within the specified
1936  * duration, the execution may be aborted. The timeout duration begins at a
1937  * call to one of:
1938  * - {@link ANeuralNetworksExecution_burstCompute}
1939  * - {@link ANeuralNetworksExecution_compute}
1940  * - {@link ANeuralNetworksExecution_startCompute}
1941  * - {@link ANeuralNetworksExecution_startComputeWithDependencies}
1942  *
1943  * This timeout duration acts as a hint to drivers, and can be used to both free
1944  * up compute resources within the driver and return control back to the
1945  * application quicker than is possible without the hint. It enables drivers
1946  * that are able to estimate how long an execution will take to abort the
1947  * execution before it has even started if the driver believes the execution
1948  * cannot be completed within the timeout duration. Similarly, it enables
1949  * drivers to abort an ongoing execution if it is taking too long. However, this
1950  * call does not guarantee that the execution will complete or abort within the
1951  * timeout duration.
1952  *
1953  * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called),
1954  * the timeout duration for execution is considered infinite.
1955  *
1956  * The {@link ANeuralNetworksExecution} must have been created from an
1957  * {@link ANeuralNetworksCompilation} which in turn was created from
1958  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1959  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1960  * device has a feature level reported by
1961  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1962  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1963  * be ignored.
1964  *
1965  * This function may only be invoked when the execution is in the preparation state.
1966  *
1967  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1968  *
1969  * @param execution The execution to be modified.
1970  * @param duration The maximum amount of time in nanoseconds that is expected to
1971  *     be spent executing a model. If this duration is exceeded, the execution
1972  *     may be aborted. If set to 0, the timeout duration is considered infinite.
1973  *
1974  * @return ANEURALNETWORKS_NO_ERROR if successful.
1975  *
1976  * Available since NNAPI feature level 4.
1977  * @deprecated NNAPI is deprecated. See
1978  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
1979  *   for more details.
1980  */
1981 int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
1982         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
1983 
1984 /**
1985  * Set the maximum duration of WHILE loops in the specified execution.
1986  *
1987  * This is a fuzzy per-loop timeout intended to prevent infinite loops.
1988  *
1989  * If a WHILE loop condition model does not output false within the specified
1990  * duration, the execution will be aborted.
1991  *
1992  * See {@link ANeuralNetworks_getDefaultLoopTimeout} and
1993  * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default
1994  * and maximum timeout values.
1995  *
1996  * This function may only be invoked when the execution is in the preparation state.
1997  *
1998  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1999  *
2000  * @param execution The execution to be modified.
2001  * @param duration The maximum amount of time in nanoseconds that can be spent
2002  *     executing a WHILE loop. If the specified duration value exceeds the value
2003  *     produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be
2004  *     overridden by that value.
2005  *
2006  * @return ANEURALNETWORKS_NO_ERROR if successful.
2007  *         ANEURALNETWORKS_BAD_STATE if execution has started.
2008  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2009  *
2010  * Available since NNAPI feature level 4.
2011  * @deprecated NNAPI is deprecated. See
2012  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2013  *   for more details.
2014  */
2015 int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
2016         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
2017 
2018 /**
2019  * Get the default timeout value for WHILE loops.
2020  *
2021  * @return The default timeout value in nanoseconds.
2022  *
2023  * Available since NNAPI feature level 4.
2024  * @deprecated NNAPI is deprecated. See
2025  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2026  *   for more details.
2027  */
2028 uint64_t ANeuralNetworks_getDefaultLoopTimeout() __NNAPI_INTRODUCED_IN(30)
2029         __NNAPI_DEPRECATED_IN(35);
2030 
2031 /**
2032  * Get the maximum timeout value for WHILE loops.
2033  *
2034  * @return The maximum timeout value in nanoseconds.
2035  *
2036  * Available since NNAPI feature level 4.
2037  * @deprecated NNAPI is deprecated. See
2038  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2039  *   for more details.
2040  */
2041 uint64_t ANeuralNetworks_getMaximumLoopTimeout() __NNAPI_INTRODUCED_IN(30)
2042         __NNAPI_DEPRECATED_IN(35);
2043 
2044 /**
2045  * Waits until the execution completes.
2046  *
2047  * More than one thread can wait on an event. When the execution completes,
2048  * all threads will be released.
2049  *
2050  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution
2051  * corresponding to this event, and the execution is not able to complete
2052  * before the duration is exceeded, the execution may be aborted, in which case
2053  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned here.
2054  *
2055  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
2056  * the condition model does not output false within the loop timeout duration,
2057  * the execution will be aborted, and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
2058  * will be returned here.
2059  *
2060  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2061  *
2062  * Available since NNAPI feature level 1.
2063  *
2064  * @param event The event that will be signaled on completion.
2065  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
2066  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
2067  *         be properly mapped.
2068  * @deprecated NNAPI is deprecated. See
2069  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2070  *   for more details.
2071  */
2072 int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27)
2073         __NNAPI_DEPRECATED_IN(35);
2074 
2075 /**
2076  * Destroys the event.
2077  *
2078  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2079  *
2080  * Available since NNAPI feature level 1.
2081  *
2082  * @param event The event object to be destroyed. Passing NULL is acceptable and
2083  *              results in no operation.
2084  * @deprecated NNAPI is deprecated. See
2085  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2086  *   for more details.
2087  */
2088 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27)
2089         __NNAPI_DEPRECATED_IN(35);
2090 
2091 /**
2092  * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor.
2093  *
2094  * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd,
2095  * it will instead dup the provided sync_fence_fd and own the duplicate.
2096  *
2097  * @param sync_fence_fd The sync_fence file descriptor.
2098  * @param event The newly created object or NULL if unsuccessful.
2099  *
2100  * @return ANEURALNETWORKS_NO_ERROR if successful.
2101  *
2102  * Available since NNAPI feature level 4.
2103  * @deprecated NNAPI is deprecated. See
2104  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2105  *   for more details.
2106  */
2107 int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event)
2108         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
2109 
2110 /**
2111  * Get sync_fence file descriptor from the event.
2112  *
2113  * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd
2114  * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned.
2115  *
2116  * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and
2117  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create
2118  * an event backed by a sync fence.
2119  *
2120  * The user takes ownership of the returned fd, and must close the returned file descriptor when
2121  * it is no longer needed.
2122  *
2123  * @param event An event that is backed by a sync fence.
2124  * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will
2125  *                      be set to -1 if there is an error.
2126  *
2127  * @return ANEURALNETWORKS_NO_ERROR if successful.
2128  *
2129  * Available since NNAPI feature level 4.
2130  * @deprecated NNAPI is deprecated. See
2131  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2132  *   for more details.
2133  */
2134 int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd)
2135         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
2136 
2137 /**
2138  * Schedule asynchronous evaluation of the execution with dependencies.
2139  *
2140  * The execution will wait for all the depending events to be signaled before
2141  * starting the evaluation. Once the execution has completed and the outputs
2142  * are ready to be consumed, the returned event will be signaled. Depending on which
2143  * devices are handling the execution, the event could be backed by a sync fence.
2144  * Use {@link ANeuralNetworksEvent_wait} to wait for that event.
2145  *
2146  * ANeuralNetworksEvent_wait must be called to recurperate the resources used
2147  * by the execution.
2148  *
2149  * If parts of the execution are scheduled on devices that do not support fenced execution,
2150  * the function call may wait for such parts to finish before returning.
2151  *
2152  * The function will return an error if any of the events in dependencies is already in a bad
2153  * state. After the execution is scheduled, if any of the events in dependencies does not complete
2154  * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned
2155  * event will return an error.
2156  *
2157  * The function will return an error if any of the execution outputs has a tensor operand type
2158  * that is not fully specified.
2159  *
2160  * The function can be passed a timeout duration in nanoseconds. This timeout
2161  * duration acts as a hint to drivers in the same way that the timeout durations
2162  * in {@link ANeuralNetworksCompilation_setTimeout} and {@link
2163  * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration
2164  * begins when all waitFor sync fences have been signaled, and can be used
2165  * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the
2166  * maximum timeout duration beginning at the call to
2167  * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
2168  * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created
2169  * from an {@link ANeuralNetworksCompilation} which in turn was created from
2170  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
2171  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either
2172  * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the
2173  * timeout duration passed to this call is exceeded, the execution may be
2174  * aborted, in which case ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be
2175  * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies}
2176  * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
2177  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
2178  * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration
2179  * hints will be ignored.
2180  *
2181  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
2182  * the condition model does not output false within the loop timeout duration,
2183  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
2184  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
2185  * object.
2186  *
2187  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
2188  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
2189  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
2190  * the execution is in the completed state.
2191  *
2192  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2193  *
2194  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
2195  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
2196  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
2197  *
2198  * @param execution The execution to be scheduled and executed.
2199  * @param dependencies A set of depending events. The actual evaluation will not start
2200  *                     until all the events are signaled.
2201  * @param num_dependencies The number of events in the dependencies set.
2202  * @param duration The maximum amount of time in nanoseconds that is expected to
2203  *                 be spent executing the model after all dependencies are
2204  *                 signaled. If set to 0, the timeout duration is considered
2205  *                 infinite.
2206  * @param event The event that will be signaled on completion. event is set to
2207  *              NULL if there's an error.
2208  *
2209  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
2210  *
2211  * Available since NNAPI feature level 4.
2212  * @deprecated NNAPI is deprecated. See
2213  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2214  *   for more details.
2215  */
2216 int ANeuralNetworksExecution_startComputeWithDependencies(
2217         ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies,
2218         uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event)
2219         __NNAPI_INTRODUCED_IN(30) __NNAPI_DEPRECATED_IN(35);
2220 
2221 /**
2222  * Get the NNAPI runtime feature level.
2223  *
2224  * Since API level 31 (NNAPI feature level 5), the NNAPI runtime (libneuralnetworks.so) and its
2225  * API specification can be updated between Android API releases.
2226  *
2227  * On Android devices with API level 31 and newer, for NNAPI runtime feature discovery,
2228  * the NNAPI runtime feature level must be used instead of the Android device API level.
2229  *
2230  * On Android devices with API level 30 and older, the Android API level of the Android
2231  * device must be used for NNAPI runtime feature discovery. Enum values in
2232  * {@link FeatureLevelCode} from feature level 1 to 5 have their corresponding Android
2233  * API levels listed in their documentation, and each such enum value equals the corresponding
2234  * API level. This allows using the Android API level as the feature level.
2235  * This mapping between enum value and Android API level does not exist for feature levels
2236  * after NNAPI feature level 5 and API levels after S (31).
2237  *
2238  * Example usage:
2239  * int device_api_level = android_get_device_api_level();
2240  * int64_t runtime_feature_level = (device_api_level < __ANDROID_API_S__) ?
2241  *                                  device_api_level : ANeuralNetworks_getRuntimeFeatureLevel();
2242  *
2243  * Runtime feature level is closely related to NNAPI device feature level
2244  * ({@link ANeuralNetworksDevice_getFeatureLevel}), which indicates an NNAPI device feature level
2245  * (the most advanced NNAPI specification and features that the driver implements).
2246  * This function expresses NNAPI runtime feature level, which indicates the most advanced
2247  * NNAPI specification and features the runtime implements. An NNAPI device feature level is
2248  * always less than or equal to the runtime feature level.
2249  *
2250  * This function returns a {@link FeatureLevelCode} enum value,
2251  * which is the NNAPI specification version that this NNAPI runtime implements.
2252  * It is NOT an Android API level.
2253  *
2254  * Available since NNAPI feature level 5.
2255  * @deprecated NNAPI is deprecated. See
2256  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2257  *   for more details.
2258  */
2259 int64_t ANeuralNetworks_getRuntimeFeatureLevel() __NNAPI_INTRODUCED_IN(31)
2260         __NNAPI_DEPRECATED_IN(35);
2261 
2262 /**
2263  * Specifies whether the {@link ANeuralNetworksExecution} is able to accept padded input and output
2264  * buffers and memory objects.
2265  *
2266  * By default, the input and output buffers and memory objects of {@link ANeuralNetworksExecution}
2267  * do not allow padding.
2268  *
2269  * Setting the execution to accept padded input and output buffers and memory objects enables the
2270  * length argument of {@link ANeuralNetworksExecution_setInput},
2271  * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2272  * and {@link ANeuralNetworksExecution_setOutputFromMemory} to be greater than the raw size of the
2273  * operand (i.e. the size of an element multiplied by the number of elements). The extra bytes
2274  * at the end of the buffer or memory region may be used by the driver to access data in chunks,
2275  * for efficiency.
2276  *
2277  * This method must not be called after {@link ANeuralNetworksExecution_setInput},
2278  * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2279  * or {@link ANeuralNetworksExecution_setOutputFromMemory}.
2280  *
2281  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2282  *
2283  * @param execution The execution to be modified.
2284  * @param enable 'true' if the execution is to be able to accept padded input and output buffers
2285  *               and memory objects, 'false' if not.
2286  *
2287  * @return ANEURALNETWORKS_NO_ERROR if successful.
2288  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2289  *         ANEURALNETWORKS_BAD_STATE if {@link ANeuralNetworksExecution_setInput},
2290  *         {@link ANeuralNetworksExecution_setInputFromMemory},
2291  *         {@link ANeuralNetworksExecution_setOutput}, or
2292  *         {@link ANeuralNetworksExecution_setOutputFromMemory} has been called on the execution.
2293  *
2294  * Available since NNAPI feature level 5.
2295  * @deprecated NNAPI is deprecated. See
2296  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2297  *   for more details.
2298  */
2299 int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution,
2300                                                          bool enable) __NNAPI_INTRODUCED_IN(31)
2301         __NNAPI_DEPRECATED_IN(35);
2302 
2303 /**
2304  * Get the preferred buffer and memory alignment of an input to an execution created from a
2305  * particular compilation.
2306  *
2307  * The user may use the returned alignment value to guide the layout of the input buffer or memory
2308  * pool. To achieve the best performance, make sure the address of the buffer passed in
2309  * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in
2310  * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the perferred alignment
2311  * value of the same input. A driver may choose to allocate a separate buffer and do memory copying
2312  * if the provided buffer or memory does not satisfy the preferred alignment.
2313  *
2314  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2315  *
2316  * @param compilation The compilation object. It must already have been finished by calling
2317  *                    {@link ANeuralNetworksCompilation_finish}.
2318  * @param index The index of the input argument we are referencing from the compilation. It is
2319  *              an index into the inputs list passed to
2320  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2321  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2322  * @param alignment The returned preferred alignment in bytes. It will be a power of 2.
2323  *
2324  * @return ANEURALNETWORKS_NO_ERROR if successful.
2325  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2326  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2327  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2328  *
2329  * Available since NNAPI feature level 5.
2330  * @deprecated NNAPI is deprecated. See
2331  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2332  *   for more details.
2333  */
2334 int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
2335         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
2336         __NNAPI_INTRODUCED_IN(31) __NNAPI_DEPRECATED_IN(35);
2337 
2338 /**
2339  * Get the preferred buffer and memory end padding of an input to an execution created from a
2340  * particular compilation.
2341  *
2342  * The user may use the returned padding value to guide the layout of the input buffer or memory
2343  * pool. To achieve the best performance, make sure the length value passed in
2344  * {@link ANeuralNetworksExecution_setInput} or
2345  * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of
2346  * the input (i.e. the size of an element multiplied by the number of elements) rounding up to
2347  * a multiple of the perferred padding value of the same input. A driver may choose to allocate a
2348  * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2349  * the preferred padding.
2350  *
2351  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2352  * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2353  * {@link ANeuralNetworksExecution_setInput}, and
2354  * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing
2355  * input buffer or memory padding to the driver.
2356  *
2357  * @param compilation The compilation object. It must already have been finished by calling
2358  *                    {@link ANeuralNetworksCompilation_finish}.
2359  * @param index The index of the input argument we are referencing from the compilation. It is
2360  *              an index into the inputs list passed to
2361  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2362  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2363  * @param padding The returned preferred padding in bytes. It will be a power of 2.
2364  *
2365  * @return ANEURALNETWORKS_NO_ERROR if successful.
2366  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2367  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2368  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2369  *
2370  * Available since NNAPI feature level 5.
2371  * @deprecated NNAPI is deprecated. See
2372  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2373  *   for more details.
2374  */
2375 int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
2376         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
2377         __NNAPI_INTRODUCED_IN(31) __NNAPI_DEPRECATED_IN(35);
2378 
2379 /**
2380  * Get the preferred buffer and memory alignment of an output to an execution created from a
2381  * particular compilation.
2382  *
2383  * The user may use the returned alignment value to guide the layout of the output buffer or memory
2384  * pool. To achieve the best performance, make sure the address of the buffer passed in
2385  * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in
2386  * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the perferred alignment
2387  * value of the same output. A driver may choose to allocate a separate buffer and do memory copying
2388  * if the provided buffer or memory does not satisfy the preferred alignment.
2389  *
2390  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2391  *
2392  * @param compilation The compilation object. It must already have been finished by calling
2393  *                    {@link ANeuralNetworksCompilation_finish}.
2394  * @param index The index of the output argument we are referencing from the compilation. It is
2395  *              an index into the outputs list passed to
2396  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2397  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2398  * @param alignment The returned perferred alignment in bytes. It will be a power of 2.
2399  *
2400  * @return ANEURALNETWORKS_NO_ERROR if successful.
2401  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2402  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2403  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2404  *
2405  * Available since NNAPI feature level 5.
2406  * @deprecated NNAPI is deprecated. See
2407  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2408  *   for more details.
2409  */
2410 int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
2411         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
2412         __NNAPI_INTRODUCED_IN(31) __NNAPI_DEPRECATED_IN(35);
2413 
2414 /**
2415  * Get the preferred memory end padding of an output to an execution created from a particular
2416  * compilation.
2417  *
2418  * The user may use the returned padding value to guide the layout of the output buffer or memory
2419  * pool. To achieve the best performance, make sure the length value passed in
2420  * {@link ANeuralNetworksExecution_setOutput} or
2421  * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of
2422  * the output (i.e. the size of an element multiplied by the number of elements) rounding up to
2423  * a multiple of the perferred padding value of the same output. A driver may choose to allocate a
2424  * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2425  * the preferred padding.
2426  *
2427  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2428  * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2429  * {@link ANeuralNetworksExecution_setOutput}, and
2430  * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing
2431  * output buffer or memory padding to the driver.
2432  *
2433  * @param compilation The compilation object. It must already have been finished by calling
2434  *                    {@link ANeuralNetworksCompilation_finish}.
2435  * @param index The index of the output argument we are referencing from the compilation. It is
2436  *              an index into the outputs list passed to
2437  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2438  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2439  * @param padding The returned perferred padding in bytes. It will be a power of 2.
2440  *
2441  * @return ANEURALNETWORKS_NO_ERROR if successful.
2442  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2443  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2444  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2445  *
2446  * Available since NNAPI feature level 5.
2447  * @deprecated NNAPI is deprecated. See
2448  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2449  *   for more details.
2450  */
2451 int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
2452         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
2453         __NNAPI_INTRODUCED_IN(31) __NNAPI_DEPRECATED_IN(35);
2454 
2455 /**
2456  * Specifies whether the {@link ANeuralNetworksExecution} can be reused for multiple computations.
2457  *
2458  * By default, the {@link ANeuralNetworksExecution} is not reusable.
2459  *
2460  * Setting the execution to be reusable enables multiple computations to be scheduled and evaluated
2461  * on the same execution sequentially, either by means of
2462  * {@link ANeuralNetworksExecution_burstCompute}, {@link ANeuralNetworksExecution_compute},
2463  * {@link ANeuralNetworksExecution_startCompute} or
2464  * {@link ANeuralNetworksExecution_startComputeWithDependencies}: The application may schedule and
2465  * evaluate a computation again from the completed state of a reusable execution.
2466  *
2467  * This function may only be invoked when the execution is in the preparation state.
2468  *
2469  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2470  *
2471  * @param execution The execution to be modified.
2472  * @param reusable 'true' if the execution is to be reusable, 'false' if not.
2473  *
2474  * @return ANEURALNETWORKS_NO_ERROR if successful.
2475  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2476  *         ANEURALNETWORKS_BAD_STATE if the execution is not in the preparation state.
2477  *
2478  * Available since NNAPI feature level 5.
2479  * @deprecated NNAPI is deprecated. See
2480  *   <a href="https://developer.android.com/ndk/guides/neuralnetworks">this webpage</a>
2481  *   for more details.
2482  */
2483 int ANeuralNetworksExecution_setReusable(ANeuralNetworksExecution* execution, bool reusable)
2484         __NNAPI_INTRODUCED_IN(31) __NNAPI_DEPRECATED_IN(35);
2485 
2486 __END_DECLS
2487 
2488 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_H
2489 
2490 #undef __NNAPI_INTRODUCED_IN
2491 
2492 /** @} */
2493