1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/tools/versioning/op_signature.h"
16
17 #include <cstdlib>
18
19 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
20 #include "tensorflow/lite/kernels/internal/compatibility.h"
21 #include "tensorflow/lite/kernels/kernel_util.h"
22 #include "tensorflow/lite/schema/schema_utils.h"
23 #include "tensorflow/lite/stderr_reporter.h"
24
25 namespace tflite {
26 namespace {
27
28 // A BuiltinDataAllocator which just uses malloc()/free().
29 class MallocDataAllocator : public BuiltinDataAllocator {
30 public:
Allocate(size_t size,size_t alignment_hint)31 void* Allocate(size_t size, size_t alignment_hint) override {
32 return malloc(size);
33 }
Deallocate(void * data)34 void Deallocate(void* data) override { free(data); }
35 };
36
37 // Get the number of dimensions of a tensor with idx of an operator op.
GetNumDims(const SubGraph * subgraph,const Operator * op,int idx)38 inline int GetNumDims(const SubGraph* subgraph, const Operator* op, int idx) {
39 return subgraph->tensors()->Get(op->inputs()->Get(idx))->shape()->size();
40 }
41
GetOpSignatureTensorSpecs(const flatbuffers::Vector<int32_t> * tensors,const SubGraph * subgraph,const Model * model)42 std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
43 const flatbuffers::Vector<int32_t>* tensors, const SubGraph* subgraph,
44 const Model* model) {
45 std::vector<OpSignatureTensorSpec> tensor_specs;
46 StderrReporter error_reporter;
47
48 for (int32_t i = 0; i < tensors->Length(); ++i) {
49 int32_t tensor_no = tensors->Get(i);
50
51 OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
52 if (tensor_no >= 0) {
53 if (subgraph->tensors() && tensor_no < subgraph->tensors()->Length()) {
54 auto* fb_tensor = subgraph->tensors()->Get(tensor_no);
55 ConvertTensorType(fb_tensor->type(), &tensor_spec.type,
56 &error_reporter);
57 auto buffer_idx = fb_tensor->buffer();
58 // Check if the tensor is a constant tensor.
59 if (buffer_idx != 0 && buffer_idx < model->buffers()->Length()) {
60 auto* buffer = model->buffers()->Get(buffer_idx);
61 if (buffer->data() && buffer->data()->size() != 0) {
62 tensor_spec.is_const = true;
63 }
64 }
65 const flatbuffers::Vector<int32_t>* shape_vec =
66 subgraph->tensors()->Get(tensor_no)->shape();
67 if (shape_vec) {
68 for (int32_t j = 0; j < shape_vec->Length(); ++j) {
69 tensor_spec.dims.push_back(shape_vec->Get(j));
70 }
71 }
72 const flatbuffers::Vector<int32_t>* shape_signature_vec =
73 subgraph->tensors()->Get(tensor_no)->shape_signature();
74 tensor_spec.is_shape_dynamic = false;
75 if (shape_signature_vec) {
76 for (int32_t j = 0; j < shape_signature_vec->Length(); ++j) {
77 if (shape_signature_vec->Get(j) == -1) {
78 tensor_spec.is_shape_dynamic = true;
79 break;
80 }
81 }
82 }
83 }
84 }
85 tensor_specs.push_back(tensor_spec);
86 }
87 return tensor_specs;
88 }
89
GetOpSignatureTensorSpecs(TfLiteIntArray * tensors,const TfLiteContext * context,const TfLiteNode * tflite_node)90 std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
91 TfLiteIntArray* tensors, const TfLiteContext* context,
92 const TfLiteNode* tflite_node) {
93 std::vector<OpSignatureTensorSpec> tensor_specs;
94
95 for (int32_t i = 0; i < tensors->size; ++i) {
96 int32_t tensor_no = tensors->data[i];
97
98 OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
99 if (tensor_no >= 0) {
100 const TfLiteTensor* tfl_tensor;
101 if (context->tensors != nullptr) {
102 tfl_tensor = &context->tensors[tensor_no];
103 } else {
104 tfl_tensor = context->GetTensor(context, tensor_no);
105 }
106 if (tfl_tensor != nullptr) {
107 tensor_spec.type = tfl_tensor->type;
108 tensor_spec.is_const = (tfl_tensor->allocation_type == kTfLiteMmapRo);
109 if (tfl_tensor->dims) {
110 for (int32_t j = 0; j < tfl_tensor->dims->size; ++j) {
111 tensor_spec.dims.push_back(tfl_tensor->dims->data[j]);
112 }
113 }
114 tensor_spec.is_shape_dynamic = HasUnspecifiedDimension(tfl_tensor);
115 }
116 }
117 tensor_specs.push_back(tensor_spec);
118 }
119 return tensor_specs;
120 }
121
122 } // namespace
123
GetOpSignature(const OperatorCode * op_code,const Operator * op,const SubGraph * subgraph,const Model * model)124 OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
125 const SubGraph* subgraph, const Model* model) {
126 auto builtin_code = GetBuiltinCode(op_code);
127 OpSignature op_sig = {builtin_code};
128 std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
129
130 if (builtin_code != BuiltinOperator_CUSTOM) {
131 StderrReporter error_reporter;
132 MallocDataAllocator allocator;
133 ParseOpData(op, builtin_code, &error_reporter, &allocator,
134 &op_sig.builtin_data);
135 } else {
136 op_sig.custom_name = op_code->custom_code()->str();
137 }
138
139 switch (builtin_code) {
140 case BuiltinOperator_DEPTHWISE_CONV_2D: {
141 const Tensor* filter_tensor =
142 subgraph->tensors()->Get(op->inputs()->Get(1));
143 const QuantizationParameters* filter_quant =
144 filter_tensor->quantization();
145 int num_channels = filter_tensor->shape()->Get(3);
146 if (filter_quant && filter_quant->scale() &&
147 filter_quant->scale()->Length() &&
148 filter_quant->scale()->Length() == num_channels) {
149 op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized = true;
150 }
151 } break;
152
153 case BuiltinOperator_FULLY_CONNECTED: {
154 const Tensor* weight_tensor =
155 subgraph->tensors()->Get(op->inputs()->Get(1));
156 op_sig.ext_options.fully_connected.sparse_weight =
157 (weight_tensor->sparsity() != nullptr);
158 } break;
159
160 case BuiltinOperator_MUL: {
161 if (op->inputs()->Length() < 2 || op->outputs()->Length() < 1) {
162 break;
163 }
164 const Tensor* input1_tensor =
165 subgraph->tensors()->Get(op->inputs()->Get(0));
166 const Tensor* input2_tensor =
167 subgraph->tensors()->Get(op->inputs()->Get(1));
168 const Tensor* output_tensor =
169 subgraph->tensors()->Get(op->outputs()->Get(0));
170 const QuantizationParameters* input1_quant =
171 input1_tensor->quantization();
172 const QuantizationParameters* input2_qunt = input2_tensor->quantization();
173 const QuantizationParameters* output_quant =
174 output_tensor->quantization();
175 if (input1_quant && input1_quant->scale() &&
176 input1_quant->scale()->Length() && input2_qunt &&
177 input2_qunt->scale() && input2_qunt->scale()->Length() &&
178 output_quant && output_quant->scale() &&
179 output_quant->scale()->Length()) {
180 op_sig.ext_options.mul.input1_scale = input1_quant->scale()->Get(0);
181 op_sig.ext_options.mul.input2_scale = input2_qunt->scale()->Get(0);
182 op_sig.ext_options.mul.output_scale = output_quant->scale()->Get(0);
183 }
184 } break;
185
186 case BuiltinOperator_CONV_2D: {
187 const Tensor* input_tensor =
188 subgraph->tensors()->Get(op->inputs()->Get(0));
189 const Tensor* filter_tensor =
190 subgraph->tensors()->Get(op->inputs()->Get(1));
191 const QuantizationParameters* filter_quant =
192 filter_tensor->quantization();
193 int num_filters = filter_tensor->shape()->Get(0);
194 if (filter_quant && filter_quant->scale() &&
195 filter_quant->scale()->Length() &&
196 filter_quant->scale()->Length() == num_filters) {
197 op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
198 }
199 if (input_tensor->shape()->size()) {
200 int num_input_channels = input_tensor->shape()->Get(3);
201 int num_filter_input_channels = filter_tensor->shape()->Get(3);
202 op_sig.ext_options.conv_2d.is_grouped_convolution =
203 num_input_channels != num_filter_input_channels;
204 } else {
205 op_sig.ext_options.conv_2d.is_grouped_convolution = false;
206 }
207 } break;
208
209 case BuiltinOperator_STRIDED_SLICE: {
210 op_sig.ext_options.strided_slice.num_dims = GetNumDims(subgraph, op, 0);
211 } break;
212
213 case BuiltinOperator_ABS: {
214 if (subgraph->tensors()->Get(op->inputs()->Get(0))->quantization()) {
215 op_sig.ext_options.abs.input_quantized = true;
216 }
217 } break;
218
219 case BuiltinOperator_DEQUANTIZE: {
220 const Tensor* input_tensor =
221 subgraph->tensors()->Get(op->inputs()->Get(0));
222 const QuantizationParameters* input_quant = input_tensor->quantization();
223 if (input_quant && input_quant->scale() &&
224 input_quant->scale()->Length() > 1 &&
225 input_quant->scale()->Length() ==
226 input_tensor->shape()->Get(input_quant->quantized_dimension())) {
227 op_sig.ext_options.dequantize.is_per_channel_quantized = true;
228 }
229 } break;
230
231 case BuiltinOperator_QUANTIZE: {
232 const Tensor* output_tensor =
233 subgraph->tensors()->Get(op->outputs()->Get(0));
234 const QuantizationParameters* output_quant =
235 output_tensor->quantization();
236 if (output_quant && output_quant->scale() &&
237 output_quant->scale()->Length() > 1 &&
238 output_quant->scale()->Length() ==
239 output_tensor->shape()->Get(
240 output_quant->quantized_dimension())) {
241 op_sig.ext_options.quantize.is_per_channel_quantized = true;
242 }
243 } break;
244
245 default:
246 break;
247 }
248
249 op_sig.inputs = GetOpSignatureTensorSpecs(op->inputs(), subgraph, model);
250 op_sig.outputs = GetOpSignatureTensorSpecs(op->outputs(), subgraph, model);
251 op_sig.version = op_code->version();
252 return op_sig;
253 }
254
GetOpSignature(const TfLiteContext * context,const TfLiteNode * node,const TfLiteRegistration * registration)255 OpSignature GetOpSignature(const TfLiteContext* context, const TfLiteNode* node,
256 const TfLiteRegistration* registration) {
257 OpSignature op_sig = {
258 static_cast<BuiltinOperator>(registration->builtin_code)};
259 op_sig.builtin_data = node->builtin_data;
260 if (op_sig.op == BuiltinOperator_CUSTOM) {
261 op_sig.custom_name = registration->custom_name;
262 op_sig.custom_initial_data = node->custom_initial_data;
263 }
264 std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
265
266 op_sig.inputs = GetOpSignatureTensorSpecs(node->inputs, context, node);
267 op_sig.outputs = GetOpSignatureTensorSpecs(node->outputs, context, node);
268 op_sig.version = registration->version;
269 return op_sig;
270 }
271
272 } // namespace tflite
273