xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/reference/add_n.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
16 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
17 
18 #include <algorithm>
19 #include <limits>
20 
21 #include "tensorflow/lite/kernels/internal/common.h"
22 
23 namespace tflite {
24 namespace reference_ops {
25 
26 // T is expected to be either float or int.
27 template <typename T>
AddN(const RuntimeShape & input_shape,const size_t num_inputs,const T * const * input_data,T * output_data)28 inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
29                  const T* const* input_data, T* output_data) {
30   // All inputs and output should have the same shape, this is checked during
31   // Prepare stage.
32   const size_t size = input_shape.FlatSize();
33   for (size_t i = 0; i < size; ++i) {
34     T x = 0;
35     for (size_t j = 0; j < num_inputs; ++j) {
36       x += input_data[j][i];
37     }
38     output_data[i] = x;
39   }
40 }
41 
AddN(const ArithmeticParams & params,const RuntimeShape & input_shape,const size_t num_inputs,const int8_t * const * input_data,int8_t * output_data)42 inline void AddN(const ArithmeticParams& params,
43                  const RuntimeShape& input_shape, const size_t num_inputs,
44                  const int8_t* const* input_data, int8_t* output_data) {
45   TFLITE_DCHECK_LE(params.quantized_activation_min,
46                    params.quantized_activation_max);
47   // Input offset is negative input zero point. Activation tensors are
48   // asymmetric quantized so they span the full int8 range.
49   // All inputs should have same zero-point and scale, this is checked during
50   // Prepare stage.
51   TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
52   TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
53 
54   // All inputs and output should have the same shape, this is checked during
55   // Prepare stage.
56   const size_t size = input_shape.FlatSize();
57   for (size_t i = 0; i < size; ++i) {
58     // accumulate in scaled_x before clamping to avoid overflow
59     const int32_t x = params.input1_offset;  // x = 0
60     const int32_t shifted_x = x * (1 << params.left_shift);
61     int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp(
62         shifted_x, params.input1_multiplier, params.input1_shift);
63 
64     for (size_t j = 0; j < num_inputs; ++j) {
65       const int32_t y = params.input1_offset + input_data[j][i];
66       const int32_t shifted_y = y * (1 << params.left_shift);
67       int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
68           shifted_y, params.input1_multiplier, params.input1_shift);
69       scaled_x += scaled_y;
70     }
71 
72     const int32_t raw_output =
73         MultiplyByQuantizedMultiplierSmallerThanOneExp(
74             scaled_x, params.output_multiplier, params.output_shift) +
75         params.output_offset;
76     const int32_t clamped_output =
77         std::min(params.quantized_activation_max,
78                  std::max(params.quantized_activation_min, raw_output));
79     output_data[i] = static_cast<int8_t>(clamped_output);
80   }
81 }
82 
83 }  // namespace reference_ops
84 }  // namespace tflite
85 
86 #endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
87