1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Conversions.h"
18
19 #include <android-base/logging.h>
20 #include <android/hardware/neuralnetworks/1.0/types.h>
21 #include <nnapi/OperandTypes.h>
22 #include <nnapi/OperationTypes.h>
23 #include <nnapi/Result.h>
24 #include <nnapi/SharedMemory.h>
25 #include <nnapi/TypeUtils.h>
26 #include <nnapi/Types.h>
27 #include <nnapi/Validation.h>
28 #include <nnapi/hal/CommonUtils.h>
29
30 #include <algorithm>
31 #include <functional>
32 #include <iterator>
33 #include <memory>
34 #include <type_traits>
35 #include <utility>
36 #include <variant>
37
38 #include "Utils.h"
39
40 #ifdef __ANDROID__
41 #include <android/hardware_buffer.h>
42 #include <vndk/hardware_buffer.h>
43 #endif // __ANDROID__
44
45 namespace {
46
47 template <typename Type>
underlyingType(Type value)48 constexpr std::underlying_type_t<Type> underlyingType(Type value) {
49 return static_cast<std::underlying_type_t<Type>>(value);
50 }
51
52 } // namespace
53
54 namespace android::nn {
55 namespace {
56
57 using hardware::hidl_handle;
58 using hardware::hidl_memory;
59 using hardware::hidl_vec;
60
61 template <typename Input>
62 using UnvalidatedConvertOutput =
63 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
64
65 template <typename Type>
unvalidatedConvert(const hidl_vec<Type> & arguments)66 GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
67 const hidl_vec<Type>& arguments) {
68 std::vector<UnvalidatedConvertOutput<Type>> canonical;
69 canonical.reserve(arguments.size());
70 for (const auto& argument : arguments) {
71 canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
72 }
73 return canonical;
74 }
75
76 template <typename Type>
validatedConvert(const Type & halObject)77 GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
78 auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
79 NN_TRY(hal::V1_0::utils::compliantVersion(canonical));
80 return canonical;
81 }
82
unknownHandleFromNativeHandle(const native_handle_t * handle)83 nn::GeneralResult<nn::Memory::Unknown::Handle> unknownHandleFromNativeHandle(
84 const native_handle_t* handle) {
85 if (handle == nullptr) {
86 return NN_ERROR() << "unknownHandleFromNativeHandle failed because handle is nullptr";
87 }
88
89 std::vector<base::unique_fd> fds =
90 NN_TRY(nn::dupFds(handle->data + 0, handle->data + handle->numFds));
91
92 std::vector<int> ints(handle->data + handle->numFds,
93 handle->data + handle->numFds + handle->numInts);
94
95 return nn::Memory::Unknown::Handle{.fds = std::move(fds), .ints = std::move(ints)};
96 }
97
createSharedMemoryFromHidlMemory(const hidl_memory & memory)98 nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
99 CHECK_LE(memory.size(), std::numeric_limits<size_t>::max());
100 if (!memory.valid()) {
101 return NN_ERROR() << "Unable to convert invalid hidl_memory";
102 }
103
104 if (memory.name() == "ashmem") {
105 if (memory.handle()->numFds != 1) {
106 return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
107 << memory.handle()->numFds << " numFds, but expected 1";
108 }
109 if (memory.handle()->numInts != 0) {
110 return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
111 << memory.handle()->numInts << " numInts, but expected 0";
112 }
113 auto fd = NN_TRY(nn::dupFd(memory.handle()->data[0]));
114 auto handle = nn::Memory::Ashmem{
115 .fd = std::move(fd),
116 .size = static_cast<size_t>(memory.size()),
117 };
118 return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
119 }
120
121 if (memory.name() == "mmap_fd") {
122 if (memory.handle()->numFds != 1) {
123 return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
124 << memory.handle()->numFds << " numFds, but expected 1";
125 }
126 if (memory.handle()->numInts != 3) {
127 return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
128 << memory.handle()->numInts << " numInts, but expected 3";
129 }
130
131 const int fd = memory.handle()->data[0];
132 const int prot = memory.handle()->data[1];
133 const int lower = memory.handle()->data[2];
134 const int higher = memory.handle()->data[3];
135 const size_t offset = nn::getOffsetFromInts(lower, higher);
136
137 return nn::createSharedMemoryFromFd(static_cast<size_t>(memory.size()), prot, fd, offset);
138 }
139
140 if (memory.name() != "hardware_buffer_blob") {
141 auto handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle()));
142 auto unknown = nn::Memory::Unknown{
143 .handle = std::move(handle),
144 .size = static_cast<size_t>(memory.size()),
145 .name = memory.name(),
146 };
147 return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(unknown)});
148 }
149
150 #ifdef __ANDROID__
151 constexpr auto roundUpToMultiple = [](uint32_t value, uint32_t multiple) -> uint32_t {
152 return (value + multiple - 1) / multiple * multiple;
153 };
154
155 const auto size = memory.size();
156 const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
157 const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
158 const uint32_t width = size;
159 const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
160 const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
161
162 // AHardwareBuffer_createFromHandle() might fail because an allocator
163 // expects a specific stride value. In that case, we try to guess it by
164 // aligning the width to small powers of 2.
165 // TODO(b/174120849): Avoid stride assumptions.
166 AHardwareBuffer* hardwareBuffer = nullptr;
167 status_t status = UNKNOWN_ERROR;
168 for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
169 const uint32_t stride = roundUpToMultiple(width, alignment);
170 AHardwareBuffer_Desc desc{
171 .width = width,
172 .height = height,
173 .layers = layers,
174 .format = format,
175 .usage = usage,
176 .stride = stride,
177 };
178 status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
179 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
180 &hardwareBuffer);
181 if (status == NO_ERROR) {
182 break;
183 }
184 }
185 if (status != NO_ERROR) {
186 return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
187 << "Can't create AHardwareBuffer from handle. Error: " << status;
188 }
189
190 return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true);
191 #else // __ANDROID__
192 LOG(FATAL) << "nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const "
193 "hidl_memory& memory): Not Available on Host Build";
194 return (NN_ERROR() << "createSharedMemoryFromHidlMemory failed")
195 .
196 operator nn::GeneralResult<nn::SharedMemory>();
197 #endif // __ANDROID__
198 }
199
200 } // anonymous namespace
201
unvalidatedConvert(const hal::V1_0::OperandType & operandType)202 GeneralResult<OperandType> unvalidatedConvert(const hal::V1_0::OperandType& operandType) {
203 return static_cast<OperandType>(operandType);
204 }
205
unvalidatedConvert(const hal::V1_0::OperationType & operationType)206 GeneralResult<OperationType> unvalidatedConvert(const hal::V1_0::OperationType& operationType) {
207 return static_cast<OperationType>(operationType);
208 }
209
unvalidatedConvert(const hal::V1_0::OperandLifeTime & lifetime)210 GeneralResult<Operand::LifeTime> unvalidatedConvert(const hal::V1_0::OperandLifeTime& lifetime) {
211 return static_cast<Operand::LifeTime>(lifetime);
212 }
213
unvalidatedConvert(const hal::V1_0::DeviceStatus & deviceStatus)214 GeneralResult<DeviceStatus> unvalidatedConvert(const hal::V1_0::DeviceStatus& deviceStatus) {
215 return static_cast<DeviceStatus>(deviceStatus);
216 }
217
unvalidatedConvert(const hal::V1_0::PerformanceInfo & performanceInfo)218 GeneralResult<Capabilities::PerformanceInfo> unvalidatedConvert(
219 const hal::V1_0::PerformanceInfo& performanceInfo) {
220 return Capabilities::PerformanceInfo{
221 .execTime = performanceInfo.execTime,
222 .powerUsage = performanceInfo.powerUsage,
223 };
224 }
225
unvalidatedConvert(const hal::V1_0::Capabilities & capabilities)226 GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_0::Capabilities& capabilities) {
227 const auto quantized8Performance =
228 NN_TRY(unvalidatedConvert(capabilities.quantized8Performance));
229 const auto float32Performance = NN_TRY(unvalidatedConvert(capabilities.float32Performance));
230
231 auto table = hal::utils::makeQuantized8PerformanceConsistentWithP(float32Performance,
232 quantized8Performance);
233
234 return Capabilities{
235 .relaxedFloat32toFloat16PerformanceScalar = float32Performance,
236 .relaxedFloat32toFloat16PerformanceTensor = float32Performance,
237 .operandPerformance = std::move(table),
238 };
239 }
240
unvalidatedConvert(const hal::V1_0::DataLocation & location)241 GeneralResult<DataLocation> unvalidatedConvert(const hal::V1_0::DataLocation& location) {
242 return DataLocation{
243 .poolIndex = location.poolIndex,
244 .offset = location.offset,
245 .length = location.length,
246 };
247 }
248
unvalidatedConvert(const hal::V1_0::Operand & operand)249 GeneralResult<Operand> unvalidatedConvert(const hal::V1_0::Operand& operand) {
250 const auto type = NN_TRY(unvalidatedConvert(operand.type));
251 const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
252 const auto location = NN_TRY(unvalidatedConvert(operand.location));
253 return Operand{
254 .type = type,
255 .dimensions = operand.dimensions,
256 .scale = operand.scale,
257 .zeroPoint = operand.zeroPoint,
258 .lifetime = lifetime,
259 .location = location,
260 };
261 }
262
unvalidatedConvert(const hal::V1_0::Operation & operation)263 GeneralResult<Operation> unvalidatedConvert(const hal::V1_0::Operation& operation) {
264 const auto type = NN_TRY(unvalidatedConvert(operation.type));
265 return Operation{
266 .type = type,
267 .inputs = operation.inputs,
268 .outputs = operation.outputs,
269 };
270 }
271
unvalidatedConvert(const hidl_vec<uint8_t> & operandValues)272 GeneralResult<Model::OperandValues> unvalidatedConvert(const hidl_vec<uint8_t>& operandValues) {
273 return Model::OperandValues(operandValues.data(), operandValues.size());
274 }
275
unvalidatedConvert(const hidl_handle & handle)276 GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& handle) {
277 if (handle.getNativeHandle() == nullptr) {
278 return nullptr;
279 }
280 if (handle->numFds != 1 || handle->numInts != 0) {
281 return NN_ERROR()
282 << "unvalidatedConvert failed because handle does not only hold a single fd";
283 }
284 auto duplicatedFd = NN_TRY(nn::dupFd(handle->data[0]));
285 return std::make_shared<const Handle>(std::move(duplicatedFd));
286 }
287
unvalidatedConvert(const hidl_memory & memory)288 GeneralResult<SharedMemory> unvalidatedConvert(const hidl_memory& memory) {
289 return createSharedMemoryFromHidlMemory(memory);
290 }
291
unvalidatedConvert(const hal::V1_0::Model & model)292 GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model) {
293 auto operations = NN_TRY(unvalidatedConvert(model.operations));
294
295 // Verify number of consumers.
296 const auto numberOfConsumers =
297 NN_TRY(countNumberOfConsumers(model.operands.size(), operations));
298 CHECK(model.operands.size() == numberOfConsumers.size());
299 for (size_t i = 0; i < model.operands.size(); ++i) {
300 if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
301 return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
302 << "Invalid numberOfConsumers for operand " << i << ", expected "
303 << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
304 }
305 }
306
307 auto operands = NN_TRY(unvalidatedConvert(model.operands));
308 auto main = Model::Subgraph{
309 .operands = std::move(operands),
310 .operations = std::move(operations),
311 .inputIndexes = model.inputIndexes,
312 .outputIndexes = model.outputIndexes,
313 };
314
315 auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
316 auto pools = NN_TRY(unvalidatedConvert(model.pools));
317 return Model{
318 .main = std::move(main),
319 .operandValues = std::move(operandValues),
320 .pools = std::move(pools),
321 };
322 }
323
unvalidatedConvert(const hal::V1_0::RequestArgument & argument)324 GeneralResult<Request::Argument> unvalidatedConvert(const hal::V1_0::RequestArgument& argument) {
325 const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
326 : Request::Argument::LifeTime::POOL;
327 const auto location = NN_TRY(unvalidatedConvert(argument.location));
328 return Request::Argument{
329 .lifetime = lifetime,
330 .location = location,
331 .dimensions = argument.dimensions,
332 };
333 }
334
unvalidatedConvert(const hal::V1_0::Request & request)335 GeneralResult<Request> unvalidatedConvert(const hal::V1_0::Request& request) {
336 auto memories = NN_TRY(unvalidatedConvert(request.pools));
337 std::vector<Request::MemoryPool> pools;
338 pools.reserve(memories.size());
339 std::move(memories.begin(), memories.end(), std::back_inserter(pools));
340
341 auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
342 auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
343 return Request{
344 .inputs = std::move(inputs),
345 .outputs = std::move(outputs),
346 .pools = std::move(pools),
347 };
348 }
349
unvalidatedConvert(const hal::V1_0::ErrorStatus & status)350 GeneralResult<ErrorStatus> unvalidatedConvert(const hal::V1_0::ErrorStatus& status) {
351 switch (status) {
352 case hal::V1_0::ErrorStatus::NONE:
353 case hal::V1_0::ErrorStatus::DEVICE_UNAVAILABLE:
354 case hal::V1_0::ErrorStatus::GENERAL_FAILURE:
355 case hal::V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
356 case hal::V1_0::ErrorStatus::INVALID_ARGUMENT:
357 return static_cast<ErrorStatus>(status);
358 }
359 return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
360 << "Invalid ErrorStatus " << underlyingType(status);
361 }
362
convert(const hal::V1_0::DeviceStatus & deviceStatus)363 GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus) {
364 return validatedConvert(deviceStatus);
365 }
366
convert(const hal::V1_0::Capabilities & capabilities)367 GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
368 return validatedConvert(capabilities);
369 }
370
convert(const hal::V1_0::Model & model)371 GeneralResult<Model> convert(const hal::V1_0::Model& model) {
372 return validatedConvert(model);
373 }
374
convert(const hal::V1_0::Request & request)375 GeneralResult<Request> convert(const hal::V1_0::Request& request) {
376 return validatedConvert(request);
377 }
378
convert(const hal::V1_0::ErrorStatus & status)379 GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
380 return validatedConvert(status);
381 }
382
383 } // namespace android::nn
384
385 namespace android::hardware::neuralnetworks::V1_0::utils {
386 namespace {
387
388 template <typename Input>
389 using UnvalidatedConvertOutput =
390 std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
391
392 template <typename Type>
unvalidatedConvert(const std::vector<Type> & arguments)393 nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
394 const std::vector<Type>& arguments) {
395 hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
396 for (size_t i = 0; i < arguments.size(); ++i) {
397 halObject[i] = NN_TRY(utils::unvalidatedConvert(arguments[i]));
398 }
399 return halObject;
400 }
401
402 template <typename Type>
validatedConvert(const Type & canonical)403 nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
404 NN_TRY(compliantVersion(canonical));
405 return utils::unvalidatedConvert(canonical);
406 }
407
createNativeHandleFrom(std::vector<base::unique_fd> fds,const std::vector<int32_t> & ints)408 nn::GeneralResult<hidl_handle> createNativeHandleFrom(std::vector<base::unique_fd> fds,
409 const std::vector<int32_t>& ints) {
410 constexpr size_t kIntMax = std::numeric_limits<int>::max();
411 CHECK_LE(fds.size(), kIntMax);
412 CHECK_LE(ints.size(), kIntMax);
413 native_handle_t* nativeHandle =
414 native_handle_create(static_cast<int>(fds.size()), static_cast<int>(ints.size()));
415 if (nativeHandle == nullptr) {
416 return NN_ERROR() << "Failed to create native_handle";
417 }
418
419 for (size_t i = 0; i < fds.size(); ++i) {
420 nativeHandle->data[i] = fds[i].release();
421 }
422 std::copy(ints.begin(), ints.end(), nativeHandle->data + nativeHandle->numFds);
423
424 hidl_handle handle;
425 handle.setTo(nativeHandle, /*shouldOwn=*/true);
426 return handle;
427 }
428
createNativeHandleFrom(base::unique_fd fd,const std::vector<int32_t> & ints)429 nn::GeneralResult<hidl_handle> createNativeHandleFrom(base::unique_fd fd,
430 const std::vector<int32_t>& ints) {
431 std::vector<base::unique_fd> fds;
432 fds.push_back(std::move(fd));
433 return createNativeHandleFrom(std::move(fds), ints);
434 }
435
createNativeHandleFrom(const nn::Memory::Unknown::Handle & handle)436 nn::GeneralResult<hidl_handle> createNativeHandleFrom(const nn::Memory::Unknown::Handle& handle) {
437 std::vector<base::unique_fd> fds = NN_TRY(nn::dupFds(handle.fds.begin(), handle.fds.end()));
438 return createNativeHandleFrom(std::move(fds), handle.ints);
439 }
440
createHidlMemoryFrom(const nn::Memory::Ashmem & memory)441 nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Ashmem& memory) {
442 auto fd = NN_TRY(nn::dupFd(memory.fd));
443 auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {}));
444 return hidl_memory("ashmem", std::move(handle), memory.size);
445 }
446
createHidlMemoryFrom(const nn::Memory::Fd & memory)447 nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Fd& memory) {
448 auto fd = NN_TRY(nn::dupFd(memory.fd));
449
450 const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset);
451 const std::vector<int> ints = {memory.prot, lowOffsetBits, highOffsetBits};
452
453 auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints));
454 return hidl_memory("mmap_fd", std::move(handle), memory.size);
455 }
456
createHidlMemoryFrom(const nn::Memory::HardwareBuffer & memory)457 nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) {
458 #ifdef __ANDROID__
459 const auto* ahwb = memory.handle.get();
460 AHardwareBuffer_Desc bufferDesc;
461 AHardwareBuffer_describe(ahwb, &bufferDesc);
462
463 const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB;
464 const size_t size = isBlob ? bufferDesc.width : 0;
465 const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer";
466
467 const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
468 const hidl_handle hidlHandle(nativeHandle);
469 hidl_handle copiedHandle(hidlHandle);
470
471 return hidl_memory(name, std::move(copiedHandle), size);
472 #else // __ANDROID__
473 LOG(FATAL) << "nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const "
474 "nn::Memory::HardwareBuffer& memory): Not Available on Host Build";
475 (void)memory;
476 return (NN_ERROR() << "createHidlMemoryFrom failed").operator nn::GeneralResult<hidl_memory>();
477 #endif // __ANDROID__
478 }
479
createHidlMemoryFrom(const nn::Memory::Unknown & memory)480 nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Unknown& memory) {
481 return hidl_memory(memory.name, NN_TRY(createNativeHandleFrom(memory.handle)), memory.size);
482 }
483
484 } // anonymous namespace
485
unvalidatedConvert(const nn::OperandType & operandType)486 nn::GeneralResult<OperandType> unvalidatedConvert(const nn::OperandType& operandType) {
487 return static_cast<OperandType>(operandType);
488 }
489
unvalidatedConvert(const nn::OperationType & operationType)490 nn::GeneralResult<OperationType> unvalidatedConvert(const nn::OperationType& operationType) {
491 return static_cast<OperationType>(operationType);
492 }
493
unvalidatedConvert(const nn::Operand::LifeTime & lifetime)494 nn::GeneralResult<OperandLifeTime> unvalidatedConvert(const nn::Operand::LifeTime& lifetime) {
495 if (lifetime == nn::Operand::LifeTime::POINTER) {
496 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
497 << "Model cannot be unvalidatedConverted because it contains pointer-based memory";
498 }
499 return static_cast<OperandLifeTime>(lifetime);
500 }
501
unvalidatedConvert(const nn::DeviceStatus & deviceStatus)502 nn::GeneralResult<DeviceStatus> unvalidatedConvert(const nn::DeviceStatus& deviceStatus) {
503 return static_cast<DeviceStatus>(deviceStatus);
504 }
505
unvalidatedConvert(const nn::Capabilities::PerformanceInfo & performanceInfo)506 nn::GeneralResult<PerformanceInfo> unvalidatedConvert(
507 const nn::Capabilities::PerformanceInfo& performanceInfo) {
508 return PerformanceInfo{
509 .execTime = performanceInfo.execTime,
510 .powerUsage = performanceInfo.powerUsage,
511 };
512 }
513
unvalidatedConvert(const nn::Capabilities & capabilities)514 nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
515 const auto float32Performance = NN_TRY(unvalidatedConvert(
516 capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32)));
517 const auto quantized8Performance = NN_TRY(unvalidatedConvert(
518 capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM)));
519 return Capabilities{
520 .float32Performance = float32Performance,
521 .quantized8Performance = quantized8Performance,
522 };
523 }
524
unvalidatedConvert(const nn::DataLocation & location)525 nn::GeneralResult<DataLocation> unvalidatedConvert(const nn::DataLocation& location) {
526 return DataLocation{
527 .poolIndex = location.poolIndex,
528 .offset = location.offset,
529 .length = location.length,
530 };
531 }
532
unvalidatedConvert(const nn::Operand & operand)533 nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
534 const auto type = NN_TRY(unvalidatedConvert(operand.type));
535 const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
536 const auto location = NN_TRY(unvalidatedConvert(operand.location));
537 return Operand{
538 .type = type,
539 .dimensions = operand.dimensions,
540 .numberOfConsumers = 0,
541 .scale = operand.scale,
542 .zeroPoint = operand.zeroPoint,
543 .lifetime = lifetime,
544 .location = location,
545 };
546 }
547
unvalidatedConvert(const nn::Operation & operation)548 nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
549 const auto type = NN_TRY(unvalidatedConvert(operation.type));
550 return Operation{
551 .type = type,
552 .inputs = operation.inputs,
553 .outputs = operation.outputs,
554 };
555 }
556
unvalidatedConvert(const nn::Model::OperandValues & operandValues)557 nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
558 const nn::Model::OperandValues& operandValues) {
559 return hidl_vec<uint8_t>(operandValues.data(), operandValues.data() + operandValues.size());
560 }
561
unvalidatedConvert(const nn::SharedHandle & handle)562 nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
563 if (handle == nullptr) {
564 return {};
565 }
566 base::unique_fd fd = NN_TRY(nn::dupFd(handle->get()));
567 return createNativeHandleFrom(std::move(fd), {});
568 }
569
unvalidatedConvert(const nn::SharedMemory & memory)570 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
571 if (memory == nullptr) {
572 return NN_ERROR() << "Memory must be non-empty";
573 }
574 return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle);
575 }
576
unvalidatedConvert(const nn::Model & model)577 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
578 if (!hal::utils::hasNoPointerData(model)) {
579 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
580 << "Mdoel cannot be unvalidatedConverted because it contains pointer-based memory";
581 }
582
583 auto operands = NN_TRY(unvalidatedConvert(model.main.operands));
584
585 // Update number of consumers.
586 const auto numberOfConsumers =
587 NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations));
588 CHECK(operands.size() == numberOfConsumers.size());
589 for (size_t i = 0; i < operands.size(); ++i) {
590 operands[i].numberOfConsumers = numberOfConsumers[i];
591 }
592
593 auto operations = NN_TRY(unvalidatedConvert(model.main.operations));
594 auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
595 auto pools = NN_TRY(unvalidatedConvert(model.pools));
596 return Model{
597 .operands = std::move(operands),
598 .operations = std::move(operations),
599 .inputIndexes = model.main.inputIndexes,
600 .outputIndexes = model.main.outputIndexes,
601 .operandValues = std::move(operandValues),
602 .pools = std::move(pools),
603 };
604 }
605
unvalidatedConvert(const nn::Request::Argument & requestArgument)606 nn::GeneralResult<RequestArgument> unvalidatedConvert(
607 const nn::Request::Argument& requestArgument) {
608 if (requestArgument.lifetime == nn::Request::Argument::LifeTime::POINTER) {
609 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
610 << "Request cannot be unvalidatedConverted because it contains pointer-based memory";
611 }
612 const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE;
613 const auto location = NN_TRY(unvalidatedConvert(requestArgument.location));
614 return RequestArgument{
615 .hasNoValue = hasNoValue,
616 .location = location,
617 .dimensions = requestArgument.dimensions,
618 };
619 }
620
unvalidatedConvert(const nn::Request::MemoryPool & memoryPool)621 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool) {
622 return unvalidatedConvert(std::get<nn::SharedMemory>(memoryPool));
623 }
624
unvalidatedConvert(const nn::Request & request)625 nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request) {
626 if (!hal::utils::hasNoPointerData(request)) {
627 return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
628 << "Request cannot be unvalidatedConverted because it contains pointer-based memory";
629 }
630
631 auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
632 auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
633 auto pools = NN_TRY(unvalidatedConvert(request.pools));
634 return Request{
635 .inputs = std::move(inputs),
636 .outputs = std::move(outputs),
637 .pools = std::move(pools),
638 };
639 }
640
unvalidatedConvert(const nn::ErrorStatus & status)641 nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& status) {
642 switch (status) {
643 case nn::ErrorStatus::NONE:
644 case nn::ErrorStatus::DEVICE_UNAVAILABLE:
645 case nn::ErrorStatus::GENERAL_FAILURE:
646 case nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
647 case nn::ErrorStatus::INVALID_ARGUMENT:
648 return static_cast<ErrorStatus>(status);
649 default:
650 return ErrorStatus::GENERAL_FAILURE;
651 }
652 }
653
convert(const nn::DeviceStatus & deviceStatus)654 nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
655 return validatedConvert(deviceStatus);
656 }
657
convert(const nn::Capabilities & capabilities)658 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
659 return validatedConvert(capabilities);
660 }
661
convert(const nn::Model & model)662 nn::GeneralResult<Model> convert(const nn::Model& model) {
663 return validatedConvert(model);
664 }
665
convert(const nn::Request & request)666 nn::GeneralResult<Request> convert(const nn::Request& request) {
667 return validatedConvert(request);
668 }
669
convert(const nn::ErrorStatus & status)670 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status) {
671 return validatedConvert(status);
672 }
673
674 } // namespace android::hardware::neuralnetworks::V1_0::utils
675