1%% template file for generating NeuralNetworksTypes.h. 2%% see README.md. 3/* 4 * Copyright (C) 2017 The Android Open Source Project 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19/** 20 * @addtogroup NeuralNetworks 21 * @{ 22 */ 23 24/** 25 * @file NeuralNetworksTypes.h 26 */ 27 28#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H 29#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H 30 31/****************************************************************** 32 * 33 * IMPORTANT NOTICE: 34 * 35 * This file is part of Android's set of stable system headers 36 * exposed by the Android NDK (Native Development Kit). 37 * 38 * Third-party source AND binary code relies on the definitions 39 * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. 40 * 41 * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) 42 * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS 43 * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY 44 * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES 45 */ 46 47#include <stdbool.h> 48#include <stddef.h> 49#include <stdint.h> 50#include <sys/cdefs.h> 51 52#ifdef __ANDROID__ 53#include <android/hardware_buffer.h> 54#endif // __ANDROID__ 55 56__BEGIN_DECLS 57 58%insert Operand_1.0_Comment 59typedef enum { 60%insert Operand_1.0 61%insert Operand_1.2 62%insert Operand_1.3 63} OperandCode; 64 65%insert Operation_1.0_Comment 66typedef enum { 67 // Operations below are available since NNAPI feature level 1. 68 69%insert Operation_1.0 70 71 // Operations below are available since NNAPI feature level 2. 72 73%insert Operation_1.1 74 75 // Operations below are available since NNAPI feature level 3. 76 77%insert Operation_1.2 78 79 // Operations below are available since NNAPI feature level 4. 80 81%insert Operation_1.3 82 83 // Operations below are available since NNAPI feature level 6. 84 85%insert Operation_fl6 86 87 // Operations below are available since NNAPI feature level 7. 88 89%insert Operation_fl7 90} OperationCode; 91 92%insert FusedActivationFunc 93 94/** 95 * Implicit padding algorithms. 96 * 97 * 98 * Available since NNAPI feature level 1. 99 */ 100typedef enum { 101 /** 102 * SAME padding. 103 * Padding on both ends are the "same": 104 * padding_to_beginning = total_padding / 2 105 * padding_to_end = (total_padding + 1)/2. 106 * i.e., for even number of padding, padding to both ends are exactly 107 * the same; for odd number of padding, padding to the ending is bigger 108 * than the padding to the beginning by 1. 109 * 110 * total_padding is a function of input, stride, dilation and filter size. 111 * It could be computed as follows: 112 * out_size = (input + stride - 1) / stride 113 * effective_filter_size = (filter_size - 1) * dilation + 1 114 * needed_input = (out_size - 1) * stride + effective_filter_size 115 * total_padding = max(0, needed_input - input_size) 116 * The computation is the same for the horizontal and vertical directions. 117 */ 118 ANEURALNETWORKS_PADDING_SAME = 1, 119 120 /** 121 * VALID padding. 122 * No padding. When the input size is not evenly divisible by 123 * the filter size, the input at the end that could not fill 124 * the whole filter tile will simply be ignored. 125 */ 126 ANEURALNETWORKS_PADDING_VALID = 2, 127} PaddingCode; 128 129%insert ExecutionPreference 130 131%insert DeviceType 132 133/** 134 * NNAPI feature levels. 135 * 136 * Each update of the NNAPI specification yields a new NNAPI feature level enum value. 137 * NNAPI feature level corrseponds to an NNAPI specification version that a driver 138 * and/or the NNAPI runtime can implement. 139 * 140 * A feature level up to and including "FEATURE_LEVEL_5" maps directly to 141 * the Android API level that introduced the corresponding update of the NNAPI 142 * specification. Feature levels after Android API level 31 have no association with 143 * API level because the NNAPI specification can be updated between Android API 144 * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and 145 * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against 146 * these enum values instead of the Android API level. 147 */ 148typedef enum { 149 /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */ 150 ANEURALNETWORKS_FEATURE_LEVEL_1 = 27, 151 /** NNAPI specification available in Android P, Android NNAPI feature level 2 */ 152 ANEURALNETWORKS_FEATURE_LEVEL_2 = 28, 153 /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */ 154 ANEURALNETWORKS_FEATURE_LEVEL_3 = 29, 155 /** NNAPI specification available in Android R, Android NNAPI feature level 4 */ 156 ANEURALNETWORKS_FEATURE_LEVEL_4 = 30, 157 /** 158 * NNAPI specification available in Android S, Android NNAPI feature level 5. 159 * After Android S, the NNAPI specification can be updated between Android 160 * API releases. 161 */ 162 ANEURALNETWORKS_FEATURE_LEVEL_5 = 31, 163 /** Android NNAPI feature level 6 */ 164 ANEURALNETWORKS_FEATURE_LEVEL_6 = 1000006, 165 /** Android NNAPI feature level 7 */ 166 ANEURALNETWORKS_FEATURE_LEVEL_7 = 1000007, 167 /** Android NNAPI feature level 8 */ 168 ANEURALNETWORKS_FEATURE_LEVEL_8 = 1000008, 169} FeatureLevelCode; 170 171/** 172 * Result codes. 173 * 174 * <p>Any NNAPI function can return any result code, including result codes not 175 * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} 176 * indicates a failure of some kind.</p> 177 * 178 * <p>Additional information about the nature of a failure can be obtained from 179 * the device log after enabling NNAPI debugging by setting the debug.nn.vlog 180 * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> 181 * 182 * Available since NNAPI feature level 1. 183 */ 184typedef enum { 185 /** 186 * Operation was successful. 187 */ 188 ANEURALNETWORKS_NO_ERROR = 0, 189 190 /** 191 * Failure caused by not enough available memory. 192 */ 193 ANEURALNETWORKS_OUT_OF_MEMORY = 1, 194 195 ANEURALNETWORKS_INCOMPLETE = 2, 196 197 /** 198 * Failure caused by unexpected null argument. 199 */ 200 ANEURALNETWORKS_UNEXPECTED_NULL = 3, 201 202 /** 203 * Failure caused by invalid function arguments, invalid model definition, 204 * invalid execution definition or invalid data at execution time. 205 */ 206 ANEURALNETWORKS_BAD_DATA = 4, 207 208 /** 209 * Failure caused by failed model execution. 210 */ 211 ANEURALNETWORKS_OP_FAILED = 5, 212 213 /** 214 * Failure caused by object being in the wrong state. 215 */ 216 ANEURALNETWORKS_BAD_STATE = 6, 217 218 /** 219 * Failure caused by not being able to map a file into memory. 220 * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer 221 * not supported by the device. 222 * Mitigate by reading its content into memory. 223 */ 224 ANEURALNETWORKS_UNMAPPABLE = 7, 225 226 /** 227 * Failure caused by insufficient buffer size provided to a model output. 228 */ 229 ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, 230 231 /** 232 * Failure caused by a device not being available. 233 */ 234 ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, 235 236 /** 237 * Failure because a deadline could not be met for a task, but future 238 * deadlines may still be met for the same task after a short delay. 239 * 240 * Available since NNAPI feature level 4. 241 */ 242 ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, 243 244 /** 245 * Failure because a deadline could not be met for a task, and future 246 * deadlines will likely also not be met for the same task even after a 247 * short delay. 248 * 249 * Available since NNAPI feature level 4. 250 */ 251 ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, 252 253 /** 254 * Failure because of a resource limitation within the driver, but future 255 * calls for the same task may still succeed after a short delay. 256 * 257 * Available since NNAPI feature level 4. 258 */ 259 ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, 260 261 /** 262 * Failure because of a resource limitation within the driver, and future 263 * calls for the same task will likely also fail even after a short 264 * delay. 265 * 266 * Available since NNAPI feature level 4. 267 */ 268 ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, 269 270 /** 271 * Failure indicating an object is in a dead state. 272 * 273 * Available since NNAPI feature level 4. 274 */ 275 ANEURALNETWORKS_DEAD_OBJECT = 14, 276} ResultCode; 277 278/** 279 * For {@link ANeuralNetworksModel_setOperandValue}, values with a 280 * length smaller or equal to this will be immediately copied into 281 * the model. The size is in bytes. 282 * 283 * Available since NNAPI feature level 1. 284 */ 285enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; 286 287/** 288 * For {@link ANeuralNetworksCompilation_setCaching}, specify the size 289 * of the cache token required from the application. The size is in bytes. 290 * 291 * Available since NNAPI feature level 3. 292 */ 293enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; 294 295/** 296 * Different duration measurements. 297 * 298 * Durations are measured in nanoseconds. 299 * 300 * Available since NNAPI feature level 3. 301 */ 302typedef enum { 303 // Execution time on hardware (not driver, which runs on host processor). 304 ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, 305 // Execution time in driver (including time on hardware). Excludes overhead 306 // such as that of the runtime itself and the IPC needed for the runtime to 307 // communicate with the driver. 308 ANEURALNETWORKS_DURATION_IN_DRIVER = 1, 309 // Execution time on hardware, after all dependencies have been signaled. 310 // If no dependencies specified (for example, if the execution was scheduled other 311 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 312 // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. 313 // Available since NNAPI feature level 4. 314 ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, 315 // Execution time in driver, after all dependencies have been signaled. Excludes 316 // overhead such as that of the runtime itself and the IPC needed for the runtime 317 // to communicate with the driver. 318 // If no dependencies specified (for example, if the execution was scheduled other 319 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 320 // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. 321 // Available since NNAPI feature level 4. 322 ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, 323} DurationCode; 324 325%insert Priority 326 327/** 328 * ANeuralNetworksMemory is an opaque type that represents memory. 329 * 330 * This type is used to represent shared memory, memory mapped files, 331 * and similar memories. 332 * 333 * By using shared memory, a program can efficiently communicate to the 334 * runtime and drivers the tensors that define a model. See 335 * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application 336 * should typically create one shared memory object that contains every constant tensor 337 * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to 338 * create shared memory from a file handle. 339 * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to 340 * create shared memory from an AHardwareBuffer handle. 341 * 342 * Memory objects can also be used to specify the input and output arguments of 343 * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} 344 * and {@link ANeuralNetworksExecution_setOutputFromMemory}. 345 * 346 * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, 347 * {@link ANeuralNetworksExecution_setInputFromMemory} and 348 * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared 349 * memory object must be aligned on a boundary of a byte size that is a multiple 350 * of the element type byte size, e.g., a tensor with 351 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. 352 * 353 * It is the application's responsibility to ensure that there are no uses of 354 * the memory after calling {@link ANeuralNetworksMemory_free}. This includes 355 * any model which references this memory because of a call to 356 * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation 357 * created using such a model, any execution object or burst object created 358 * using such a compilation, or any execution which references this memory 359 * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or 360 * {@link ANeuralNetworksExecution_setOutputFromMemory}. 361 * 362 * Available since NNAPI feature level 1. 363 * 364 * Starting at NNAPI feature level 4, the application may request creation of device native memory 365 * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation 366 * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and 367 * {@link ANeuralNetworksMemory_createFromDesc}. 368 */ 369typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; 370 371/** 372 * ANeuralNetworksModel is an opaque type that contains a description of the 373 * mathematical operations that constitute the model. 374 * 375 * <p>Build the model by calling<ul> 376 * <li>{@link ANeuralNetworksModel_create}</li> 377 * <li>{@link ANeuralNetworksModel_addOperation}</li> 378 * <li>{@link ANeuralNetworksModel_addOperand}</li> 379 * </ul> 380 * 381 * This forms a graph in which each operation and operand is a node, a 382 * directed edge from an operand to an operation indicates that the 383 * operand is an input to the operation, and a directed edge from an 384 * operation to an operand indicates that the operand is an output 385 * from the operation. This graph must be acyclic. 386 * 387 * A model is completed by calling {@link ANeuralNetworksModel_finish}. 388 * A model is destroyed by calling {@link ANeuralNetworksModel_free}. 389 * 390 * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} 391 * has been called on it.</p> 392 * 393 * <p>It is the application's responsibility to make sure that only one thread 394 * modifies a model at a given time. It is however safe for more than one 395 * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> 396 * 397 * <p>It is also the application's responsibility to ensure that there are no 398 * other uses of the model after calling {@link ANeuralNetworksModel_free}. 399 * This includes any compilation, execution object or burst object created using 400 * the model.</p> 401 * 402 * Available since NNAPI feature level 1. 403 */ 404typedef struct ANeuralNetworksModel ANeuralNetworksModel; 405 406/** 407 * ANeuralNetworksCompilation is an opaque type that can be used to compile 408 * a machine learning model. 409 * 410 * <p>To use:<ul> 411 * <li>Create a new compilation instance by calling the 412 * {@link ANeuralNetworksCompilation_create} function or 413 * {@link ANeuralNetworksCompilation_createForDevices}.</li> 414 * <li>Set any desired properties on the compilation (for example, 415 * {@link ANeuralNetworksCompilation_setPreference}).</li> 416 * <li>Optionally, set the caching signature and the cache directory on the 417 * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> 418 * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> 419 * <li>Use the compilation as many times as needed 420 * with {@link ANeuralNetworksExecution_create} and 421 * {@link ANeuralNetworksBurst_create}.</li> 422 * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} 423 * once all executions using the compilation have completed.</li></ul></p> 424 * 425 * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. 426 * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. 427 * 428 * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} 429 * has been called on it.</p> 430 * 431 * <p>It is the application's responsibility to make sure that only 432 * one thread modifies a compilation at a given time. It is however 433 * safe for more than one thread to use the compilation once 434 * {@link ANeuralNetworksCompilation_finish} has returned.</p> 435 * 436 * <p>It is also the application's responsibility to ensure that there are no other 437 * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. 438 * This includes any execution object or burst object created using the compilation, 439 * or any memory descriptor with the compilation as part of one of the roles specified by 440 * {@link ANeuralNetworksMemoryDesc_addInputRole} or 441 * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> 442 * 443 * Available since NNAPI feature level 1. 444 */ 445typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; 446 447/** 448 * ANeuralNetworksExecution is an opaque type that can be used to apply a machine 449 * learning model to a set of inputs. 450 * 451 * <p>To use:<ul> 452 * <li>Create a new execution instance by calling the 453 * {@link ANeuralNetworksExecution_create} function.</li> 454 * <li>Associate input buffers or memory regions to the model inputs with 455 * {@link ANeuralNetworksExecution_setInput} or 456 * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> 457 * <li>Associate output buffers or memory regions to the model outputs with 458 * {@link ANeuralNetworksExecution_setOutput} or 459 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> 460 * <li>Optionally, configure the execution with 461 * {@link ANeuralNetworksExecution_setLoopTimeout}, 462 * {@link ANeuralNetworksExecution_setMeasureTiming}, 463 * {@link ANeuralNetworksExecution_setReusable}, or 464 * {@link ANeuralNetworksExecution_setTimeout}. 465 * <li>Apply the model with one of the following:</li><ul> 466 * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} 467 * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, 468 * waiting for the execution to complete with 469 * {@link ANeuralNetworksEvent_wait}.</li> 470 * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> 471 * <li>Synchronously as part of an execution burst with 472 * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> 473 * If the execution has been marked as reusable, then you can 474 * apply the model more than once. 475 * <li>Destroy the execution with 476 * {@link ANeuralNetworksExecution_free}.</li></ul></p> 477 * 478 * <p>An output buffer or memory region must not overlap with any 479 * other output buffer or memory region, with an input buffer or 480 * memory region, or with an operand value in a memory object 481 * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> 482 * 483 * <p>An execution is in the preparation state after it is created by 484 * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation 485 * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute}, 486 * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute}, 487 * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of 488 * the execution object to the computation state. When the computation completes, the state of 489 * the execution object will change from the computation state to the completed state. 490 * The computation is completed when {@link ANeuralNetworksExecution_compute}, 491 * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait} 492 * has returned.</p> 493 * 494 * <p>An execution can be applied to a model with 495 * {@link ANeuralNetworksExecution_burstCompute}, 496 * {@link ANeuralNetworksExecution_compute}, 497 * {@link ANeuralNetworksExecution_startCompute} or 498 * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new 499 * executions to do new evaluations of the model.</p> 500 * 501 * <p>Starting at NNAPI feature level 5, the application may call 502 * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple 503 * computations. The application may schedule and evaluate a computation again from the completed 504 * state of a reusable execution. The execution cannot be modified between computations.</p> 505 * 506 * <p>It is the application's responsibility to make sure that only one thread 507 * modifies an execution at a given time. It is however safe for more than one 508 * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> 509 * 510 * <p>It is also the application's responsibility to ensure that the execution 511 * either has never been scheduled or has completed (i.e., that 512 * {@link ANeuralNetworksExecution_burstCompute}, 513 * {@link ANeuralNetworksExecution_compute}, or 514 * {@link ANeuralNetworksEvent_wait} has returned) before calling 515 * {@link ANeuralNetworksExecution_free}.</p>. 516 * 517 * <p>It is also the application's responsibility to ensure that there are no other 518 * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> 519 * 520 * <p>It is the application's responsibility to ensure that there are no concurrent computations 521 * scheduled and evaluated on the same execution, either by means of 522 * {@link ANeuralNetworksExecution_compute} or 523 * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) 524 * in different threads, or by means of 525 * {@link ANeuralNetworksExecution_startCompute} or 526 * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). 527 * It is however safe to schedule and evaluate multiple computations on different executions 528 * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on 529 * different burst objects.) The runtime makes no guarantee on the ordering of 530 * completion of executions. If it's important to the application, the 531 * application should enforce the ordering by ensuring that one execution 532 * completes before the next is scheduled (for example, by scheduling all 533 * executions synchronously within a single thread, or by scheduling all 534 * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between 535 * calls to {@link ANeuralNetworksExecution_startCompute}); or by using 536 * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a 537 * list of events to be signaled before starting the actual evaluation.</p> 538 * 539 * Available since NNAPI feature level 1. 540 */ 541typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; 542 543%insert SymmPerChannelQuantParams_Comment 544typedef struct ANeuralNetworksSymmPerChannelQuantParams { 545 /** The index of the channel dimension. */ 546 uint32_t channelDim; 547 /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ 548 uint32_t scaleCount; 549 /** The array of scaling values for each channel. Each value must be greater than zero. */ 550 const float* scales; 551} ANeuralNetworksSymmPerChannelQuantParams; 552 553/** 554 * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency 555 * of a rapid sequence of executions. It will likely cause overhead if only used 556 * for a single execution. 557 * 558 * ANeuralNetworksBurst serves as a context object for any number of inferences 559 * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst 560 * object and the {@link ANeuralNetworksExecution} objects used with it must all 561 * have been created from the same {@link ANeuralNetworksCompilation} object. 562 * 563 * This object is also used as a hint to drivers, providing insight to the 564 * lifetime of a rapid sequence of executions. For example, a driver may choose 565 * to increase the clock frequency of its accelerator for the lifetime of a 566 * burst object. 567 * 568 * <p>To use:<ul> 569 * <li>Create a new burst object by calling the 570 * {@link ANeuralNetworksBurst_create} function.</li> 571 * <li>For each execution:</li><ul> 572 * <li>Create {@link ANeuralNetworksExecution} and configure its 573 * properties (see {@link ANeuralNetworksExecution} for details).</li> 574 * <li>Apply the model synchronously with 575 * {@link ANeuralNetworksExecution_burstCompute}, reusing the same 576 * {@link ANeuralNetworksBurst} with the new 577 * {@link ANeuralNetworksExecution}.</li> 578 * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> 579 * <li>Destroy the burst with 580 * {@link ANeuralNetworksBurst_free}.</li></ul></p> 581 * 582 * Available since NNAPI feature level 3. 583 */ 584typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; 585 586/** 587 * ANeuralNetworksOperandType describes the type of an operand. 588 * 589 * This structure is used to describe both scalars and tensors. 590 * 591 * A tensor operand type with all dimensions specified is "fully 592 * specified". Whenever possible (i.e., whenever the dimensions are 593 * known at model construction time), a tensor operand type should be 594 * (but is not required to be) fully specified, in order to enable the 595 * best possible performance. 596 * 597 * If a tensor operand's type is not fully specified, the dimensions 598 * of the operand are deduced from the operand types and values of the 599 * operation for which that operand is an output or from the corresponding 600 * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input 601 * operand type in the case of referenced model input operands. 602 * 603 * <p>In the following situations, a tensor operand type must be fully 604 * specified:<ul> 605 * <li>The operand has a constant value, set by 606 * {@link ANeuralNetworksModel_setOperandValue} (with a 607 * non-nullptr buffer) or 608 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 609 * <li>The operand is a model input (see 610 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 611 * model within a compilation. A fully specified tensor operand type 612 * must either be provided to {@link ANeuralNetworksModel_addOperand}; 613 * or it must be provided to the corresponding 614 * {@link ANeuralNetworksExecution_setInput}, or 615 * {@link ANeuralNetworksExecution_setInputFromMemory}. 616 * EXCEPTION: If the input is optional and omitted 617 * (by passing nullptr for buffer to 618 * {@link ANeuralNetworksExecution_setInput}) then it need 619 * not have a fully specified tensor operand type.</li> 620 * <li>The operand is a model output (see 621 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 622 * model within a compilation and is to be used with {@link 623 * ANeuralNetworksExecution_startComputeWithDependencies}. 624 * A fully specified tensor operand type must either be provided 625 * to {@link ANeuralNetworksModel_addOperand}; or it must be 626 * provided to the corresponding 627 * {@link ANeuralNetworksExecution_setOutput}, or 628 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> 629 * 630 * A tensor operand type of specified rank but some number of 631 * unspecified dimensions is represented by setting dimensionCount to 632 * the rank and each unspecified dimension to 0. 633 * 634 * Available since NNAPI feature level 1. 635 * 636 * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is 637 * represented by setting dimensionCount to 0 and dimensions to NULL (just as if 638 * it were a scalar operand type). 639 */ 640typedef struct ANeuralNetworksOperandType { 641 /** 642 * The data type, e.g ANEURALNETWORKS_FLOAT32. 643 */ 644 int32_t type; 645 646 /** 647 * The number of dimensions (rank). 648 * 649 * Must be 0 for scalars. 650 */ 651 uint32_t dimensionCount; 652 653 /** 654 * The dimensions of the tensor. 655 * 656 * Must be nullptr for scalars. 657 */ 658 const uint32_t* dimensions; 659 660 /** 661 * The quantization scale. 662 * 663 * Must be 0 when not applicable to an operand type. 664 * 665 * See {@link OperandCode}. 666 */ 667 float scale; 668 669 /** 670 * The quantization zero point. 671 * 672 * Must be 0 when not applicable to an operand type. 673 * 674 * See {@link OperandCode}. 675 */ 676 int32_t zeroPoint; 677} ANeuralNetworksOperandType; 678 679/** 680 * Aliasing to {@link OperationCode}, used in function 681 * {@link ANeuralNetworksModel_addOperation}. 682 */ 683typedef int32_t ANeuralNetworksOperationType; 684 685/** 686 * ANeuralNetworksEvent is an opaque type that represents an event 687 * that will be signaled once an execution completes. 688 * 689 * Available since NNAPI feature level 1. 690 */ 691typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; 692 693/** 694 * ANeuralNetworksDevice is an opaque type that represents a device. 695 * 696 * This type is used to query basic properties and supported operations of the corresponding 697 * device, and control which device(s) a model is to be run on. 698 * 699 * Available since NNAPI feature level 3. 700 */ 701typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; 702 703/** 704 * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. 705 * 706 * A memory descriptor describes the properties of a memory object, and is used by 707 * {@link ANeuralNetworksMemory_createFromDesc}. 708 * 709 * To use: 710 * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. 711 * - Specify all of the intended input and output roles by calling 712 * {@link ANeuralNetworksMemoryDesc_addInputRole} and 713 * {@link ANeuralNetworksMemoryDesc_addOutputRole}. 714 * - Optionally, specify the memory dimensions by calling 715 * {@link ANeuralNetworksMemoryDesc_setDimensions}. 716 * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. 717 * - Use the memory descriptor as many times as needed with 718 * {@link ANeuralNetworksMemory_createFromDesc}. 719 * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. 720 * 721 * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. 722 * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. 723 * 724 * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} 725 * has been called on it. 726 * 727 * It is the application's responsibility to make sure that only 728 * one thread modifies a memory descriptor at a given time. It is however 729 * safe for more than one thread to use the memory descriptor once 730 * {@link ANeuralNetworksMemoryDesc_finish} has returned. 731 * 732 * It is also the application's responsibility to ensure that there are no other 733 * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. 734 * It is however safe to continue using a {@link ANeuralNetworksMemory} object created 735 * from the memory descriptor. 736 * 737 * Available since NNAPI feature level 4. 738 */ 739typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; 740 741__END_DECLS 742 743#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H 744 745/** @} */ 746