1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Operations"
18 
19 #include "Gather.h"
20 
21 #include "OperationResolver.h"
22 #include "OperationsExecutionUtils.h"
23 #include "Tracing.h"
24 
25 namespace android {
26 namespace nn {
27 namespace gather {
28 namespace {
29 
30 template <typename T>
eval(const T * inputData,const Shape & inputShape,int32_t axis,const int32_t * indicesData,const Shape & indicesShape,T * outputData)31 inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
32                  const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
33     const auto outerSize = getNumberOfElements(inputShape, 0, axis);
34     const auto axisSize = getSizeOfDimension(inputShape, axis);
35     const auto innerSize =
36             getNumberOfElements(inputShape, axis + 1, getNumberOfDimensions(inputShape));
37     const auto indicesCount = getNumberOfElements(indicesShape);
38     for (uint32_t outer = 0; outer < outerSize; ++outer) {
39         for (uint32_t outputIndex = 0; outputIndex < indicesCount; ++outputIndex) {
40             const auto inputIndex = static_cast<uint32_t>(indicesData[outputIndex]);
41             NN_RET_CHECK_LE(0u, inputIndex);
42             NN_RET_CHECK_LT(inputIndex, axisSize);
43             std::memcpy(outputData + (outer * indicesCount + outputIndex) * innerSize,
44                         inputData + (outer * axisSize + inputIndex) * innerSize,
45                         sizeof(T) * innerSize);
46         }
47     }
48     return true;
49 }
50 
51 }  // namespace
52 
prepare(IOperationExecutionContext * context)53 bool prepare(IOperationExecutionContext* context) {
54     Shape input = context->getInputShape(kInputTensor);
55     int32_t axis = context->getInputValue<int32_t>(kInputAxis);
56     NN_RET_CHECK(handleNegativeAxis(input, &axis));
57     Shape indices = context->getInputShape(kInputIndices);
58     Shape output = context->getOutputShape(kOutputTensor);
59 
60     output.dimensions.clear();
61     output.dimensions.reserve(getNumberOfDimensions(input) + getNumberOfDimensions(indices) - 1);
62     output.dimensions.insert(output.dimensions.end(), input.dimensions.begin(),
63                              input.dimensions.begin() + axis);
64     output.dimensions.insert(output.dimensions.end(), indices.dimensions.begin(),
65                              indices.dimensions.end());
66     output.dimensions.insert(output.dimensions.end(), input.dimensions.begin() + axis + 1,
67                              input.dimensions.end());
68 
69     return context->setOutputShape(kOutputTensor, output);
70 }
71 
execute(IOperationExecutionContext * context)72 bool execute(IOperationExecutionContext* context) {
73     int32_t axis = context->getInputValue<int32_t>(kInputAxis);
74     NN_RET_CHECK(handleNegativeAxis(context->getInputShape(kInputTensor), &axis));
75     switch (context->getInputType(kInputTensor)) {
76         case OperandType::TENSOR_FLOAT16:
77             return eval(context->getInputBuffer<_Float16>(kInputTensor),
78                         context->getInputShape(kInputTensor), axis,
79                         context->getInputBuffer<int32_t>(kInputIndices),
80                         context->getInputShape(kInputIndices),
81                         context->getOutputBuffer<_Float16>(kOutputTensor));
82         case OperandType::TENSOR_FLOAT32:
83             return eval(context->getInputBuffer<float>(kInputTensor),
84                         context->getInputShape(kInputTensor), axis,
85                         context->getInputBuffer<int32_t>(kInputIndices),
86                         context->getInputShape(kInputIndices),
87                         context->getOutputBuffer<float>(kOutputTensor));
88         case OperandType::TENSOR_INT32:
89             return eval(context->getInputBuffer<int32_t>(kInputTensor),
90                         context->getInputShape(kInputTensor), axis,
91                         context->getInputBuffer<int32_t>(kInputIndices),
92                         context->getInputShape(kInputIndices),
93                         context->getOutputBuffer<int32_t>(kOutputTensor));
94         case OperandType::TENSOR_QUANT8_ASYMM:
95             return eval(context->getInputBuffer<uint8_t>(kInputTensor),
96                         context->getInputShape(kInputTensor), axis,
97                         context->getInputBuffer<int32_t>(kInputIndices),
98                         context->getInputShape(kInputIndices),
99                         context->getOutputBuffer<uint8_t>(kOutputTensor));
100         case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
101             return eval(context->getInputBuffer<int8_t>(kInputTensor),
102                         context->getInputShape(kInputTensor), axis,
103                         context->getInputBuffer<int32_t>(kInputIndices),
104                         context->getInputShape(kInputIndices),
105                         context->getOutputBuffer<int8_t>(kOutputTensor));
106         default:
107             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
108     }
109 }
110 
111 }  // namespace gather
112 
113 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(GATHER, gather::prepare, gather::execute);
114 
115 }  // namespace nn
116 }  // namespace android
117