1 /*
2  * This file is auto-generated.  DO NOT MODIFY.
3  * Using: out/host/linux-x86/bin/aidl --lang=ndk --structured --version 4 --hash 53178f8de9b8861df391cf0593f6f3e08adad33d -t --stability vintf --min_sdk_version 30 -pout/soong/.intermediates/hardware/interfaces/common/aidl/android.hardware.common_interface/2/preprocessed.aidl -pout/soong/.intermediates/hardware/interfaces/graphics/common/aidl/android.hardware.graphics.common_interface/6/preprocessed.aidl --ninja -d out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging/android/hardware/neuralnetworks/OperandType.cpp.d -h out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/include/staging -o out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging -Nhardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4 hardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4/android/hardware/neuralnetworks/OperandType.aidl
4  *
5  * DO NOT CHECK THIS FILE INTO A CODE TREE (e.g. git, etc..).
6  * ALWAYS GENERATE THIS FILE FROM UPDATED AIDL COMPILER
7  * AS A BUILD INTERMEDIATE ONLY. THIS IS NOT SOURCE CODE.
8  */
9 #pragma once
10 
11 #include <array>
12 #include <cstdint>
13 #include <memory>
14 #include <optional>
15 #include <string>
16 #include <vector>
17 #include <android/binder_enums.h>
18 #ifdef BINDER_STABILITY_SUPPORT
19 #include <android/binder_stability.h>
20 #endif  // BINDER_STABILITY_SUPPORT
21 
22 namespace aidl {
23 namespace android {
24 namespace hardware {
25 namespace neuralnetworks {
26 enum class OperandType : int32_t {
27   FLOAT32 = 0,
28   INT32 = 1,
29   UINT32 = 2,
30   TENSOR_FLOAT32 = 3,
31   TENSOR_INT32 = 4,
32   TENSOR_QUANT8_ASYMM = 5,
33   BOOL = 6,
34   TENSOR_QUANT16_SYMM = 7,
35   TENSOR_FLOAT16 = 8,
36   TENSOR_BOOL8 = 9,
37   FLOAT16 = 10,
38   TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
39   TENSOR_QUANT16_ASYMM = 12,
40   TENSOR_QUANT8_SYMM = 13,
41   TENSOR_QUANT8_ASYMM_SIGNED = 14,
42   SUBGRAPH = 15,
43 };
44 
45 }  // namespace neuralnetworks
46 }  // namespace hardware
47 }  // namespace android
48 }  // namespace aidl
49 namespace aidl {
50 namespace android {
51 namespace hardware {
52 namespace neuralnetworks {
toString(OperandType val)53 [[nodiscard]] static inline std::string toString(OperandType val) {
54   switch(val) {
55   case OperandType::FLOAT32:
56     return "FLOAT32";
57   case OperandType::INT32:
58     return "INT32";
59   case OperandType::UINT32:
60     return "UINT32";
61   case OperandType::TENSOR_FLOAT32:
62     return "TENSOR_FLOAT32";
63   case OperandType::TENSOR_INT32:
64     return "TENSOR_INT32";
65   case OperandType::TENSOR_QUANT8_ASYMM:
66     return "TENSOR_QUANT8_ASYMM";
67   case OperandType::BOOL:
68     return "BOOL";
69   case OperandType::TENSOR_QUANT16_SYMM:
70     return "TENSOR_QUANT16_SYMM";
71   case OperandType::TENSOR_FLOAT16:
72     return "TENSOR_FLOAT16";
73   case OperandType::TENSOR_BOOL8:
74     return "TENSOR_BOOL8";
75   case OperandType::FLOAT16:
76     return "FLOAT16";
77   case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
78     return "TENSOR_QUANT8_SYMM_PER_CHANNEL";
79   case OperandType::TENSOR_QUANT16_ASYMM:
80     return "TENSOR_QUANT16_ASYMM";
81   case OperandType::TENSOR_QUANT8_SYMM:
82     return "TENSOR_QUANT8_SYMM";
83   case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
84     return "TENSOR_QUANT8_ASYMM_SIGNED";
85   case OperandType::SUBGRAPH:
86     return "SUBGRAPH";
87   default:
88     return std::to_string(static_cast<int32_t>(val));
89   }
90 }
91 }  // namespace neuralnetworks
92 }  // namespace hardware
93 }  // namespace android
94 }  // namespace aidl
95 namespace ndk {
96 namespace internal {
97 #pragma clang diagnostic push
98 #pragma clang diagnostic ignored "-Wc++17-extensions"
99 template <>
100 constexpr inline std::array<aidl::android::hardware::neuralnetworks::OperandType, 16> enum_values<aidl::android::hardware::neuralnetworks::OperandType> = {
101   aidl::android::hardware::neuralnetworks::OperandType::FLOAT32,
102   aidl::android::hardware::neuralnetworks::OperandType::INT32,
103   aidl::android::hardware::neuralnetworks::OperandType::UINT32,
104   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_FLOAT32,
105   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_INT32,
106   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT8_ASYMM,
107   aidl::android::hardware::neuralnetworks::OperandType::BOOL,
108   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT16_SYMM,
109   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_FLOAT16,
110   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_BOOL8,
111   aidl::android::hardware::neuralnetworks::OperandType::FLOAT16,
112   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
113   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT16_ASYMM,
114   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT8_SYMM,
115   aidl::android::hardware::neuralnetworks::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
116   aidl::android::hardware::neuralnetworks::OperandType::SUBGRAPH,
117 };
118 #pragma clang diagnostic pop
119 }  // namespace internal
120 }  // namespace ndk
121