xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/floor_mod.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <stddef.h>
16 #include <stdint.h>
17 
18 #include "tensorflow/lite/c/common.h"
19 #include "tensorflow/lite/kernels/internal/reference/binary_function.h"
20 #include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
21 #include "tensorflow/lite/kernels/internal/tensor.h"
22 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
23 #include "tensorflow/lite/kernels/kernel_util.h"
24 
25 // TODO(b/117523611): We should factor out a binary_op and put binary ops there.
26 namespace tflite {
27 namespace ops {
28 namespace builtin {
29 namespace floor_mod {
30 namespace {
31 
32 // Input/output tensor index.
33 constexpr int kInputTensor1 = 0;
34 constexpr int kInputTensor2 = 1;
35 constexpr int kOutputTensor = 0;
36 
37 // Op data for floor_mod op.
38 struct OpData {
39   bool requires_broadcast;
40 };
41 
42 // TODO(b/117912880): Support quantization.
43 
Init(TfLiteContext * context,const char * buffer,size_t length)44 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
45   auto* data = new OpData;
46   data->requires_broadcast = false;
47   return data;
48 }
49 
Free(TfLiteContext * context,void * buffer)50 void Free(TfLiteContext* context, void* buffer) {
51   delete reinterpret_cast<OpData*>(buffer);
52 }
53 
Prepare(TfLiteContext * context,TfLiteNode * node)54 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
55   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
56   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
57 
58   // Reinterprete the opaque data provided by user.
59   OpData* data = reinterpret_cast<OpData*>(node->user_data);
60 
61   const TfLiteTensor* input1;
62   TF_LITE_ENSURE_OK(context,
63                     GetInputSafe(context, node, kInputTensor1, &input1));
64   const TfLiteTensor* input2;
65   TF_LITE_ENSURE_OK(context,
66                     GetInputSafe(context, node, kInputTensor2, &input2));
67   TfLiteTensor* output;
68   TF_LITE_ENSURE_OK(context,
69                     GetOutputSafe(context, node, kOutputTensor, &output));
70 
71   TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
72 
73   const TfLiteType type = input1->type;
74   if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) {
75     TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
76                        TfLiteTypeGetName(type));
77     return kTfLiteError;
78   }
79   output->type = type;
80 
81   data->requires_broadcast = !HaveSameShapes(input1, input2);
82 
83   TfLiteIntArray* output_size = nullptr;
84   if (data->requires_broadcast) {
85     TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
86                                    context, input1, input2, &output_size));
87   } else {
88     output_size = TfLiteIntArrayCopy(input1->dims);
89   }
90 
91   return context->ResizeTensor(context, output, output_size);
92 }
93 
94 template <typename T>
EvalImpl(TfLiteContext * context,bool requires_broadcast,const TfLiteTensor * input1,const TfLiteTensor * input2,TfLiteTensor * output)95 TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
96                       const TfLiteTensor* input1, const TfLiteTensor* input2,
97                       TfLiteTensor* output) {
98   const T* denominator_data = GetTensorData<T>(input2);
99 
100   if (input2->type == kTfLiteInt32 || input2->type == kTfLiteInt64) {
101     // Validate the denominator only for integer.
102     const int num_elements = NumElements(input2);
103     for (int i = 0; i < num_elements; ++i) {
104       if (denominator_data[i] == 0) {
105         TF_LITE_KERNEL_LOG(context, "Division by 0");
106         return kTfLiteError;
107       }
108     }
109   }
110   if (requires_broadcast) {
111     reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
112         GetTensorShape(input1), GetTensorData<T>(input1),
113         GetTensorShape(input2), denominator_data, GetTensorShape(output),
114         GetTensorData<T>(output), reference_ops::FloorMod<T>);
115   } else {
116     reference_ops::BinaryFunction<T, T, T>(
117         GetTensorShape(input1), GetTensorData<T>(input1),
118         GetTensorShape(input2), GetTensorData<T>(input2),
119         GetTensorShape(output), GetTensorData<T>(output),
120         reference_ops::FloorMod<T>);
121   }
122 
123   return kTfLiteOk;
124 }
125 
Eval(TfLiteContext * context,TfLiteNode * node)126 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
127   OpData* data = reinterpret_cast<OpData*>(node->user_data);
128 
129   const TfLiteTensor* input1;
130   TF_LITE_ENSURE_OK(context,
131                     GetInputSafe(context, node, kInputTensor1, &input1));
132   const TfLiteTensor* input2;
133   TF_LITE_ENSURE_OK(context,
134                     GetInputSafe(context, node, kInputTensor2, &input2));
135   TfLiteTensor* output;
136   TF_LITE_ENSURE_OK(context,
137                     GetOutputSafe(context, node, kOutputTensor, &output));
138 
139   switch (input1->type) {
140     case kTfLiteInt32: {
141       return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
142                                input2, output);
143     }
144     case kTfLiteInt64: {
145       return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
146                                input2, output);
147     }
148     case kTfLiteFloat32: {
149       return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
150                              output);
151     }
152     default: {
153       TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
154                          TfLiteTypeGetName(input1->type));
155       return kTfLiteError;
156     }
157   }
158 }
159 
160 }  // namespace
161 }  // namespace floor_mod
162 
Register_FLOOR_MOD()163 TfLiteRegistration* Register_FLOOR_MOD() {
164   // Init, Free, Prepare, Eval are satisfying the Interface required by
165   // TfLiteRegistration.
166   static TfLiteRegistration r = {floor_mod::Init, floor_mod::Free,
167                                  floor_mod::Prepare, floor_mod::Eval};
168   return &r;
169 }
170 
171 }  // namespace builtin
172 }  // namespace ops
173 }  // namespace tflite
174