1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/c/c_api.h"
17 #include "tensorflow/lite/c/c_api_experimental.h"
18 #include "tensorflow/lite/c/common.h"
19 #include "tensorflow/lite/c/builtin_op_data.h"
20
21 // This file exists just to verify that the above header files above can build,
22 // link, and run as "C" code.
23
24 #ifdef __cplusplus
25 #error "This file should be compiled as C code, not as C++."
26 #endif
27
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31
CheckFailed(const char * expression,const char * filename,int line_number)32 static void CheckFailed(const char *expression, const char *filename,
33 int line_number) {
34 fprintf(stderr, "ERROR: CHECK failed: %s:%d: %s\n", filename, line_number,
35 expression);
36 fflush(stderr);
37 abort();
38 }
39
40 // We use an extra level of macro indirection here to ensure that the
41 // macro arguments get evaluated, so that in a call to CHECK(foo),
42 // the call to STRINGIZE(condition) in the definition of the CHECK
43 // macro results in the string "foo" rather than the string "condition".
44 #define STRINGIZE(expression) STRINGIZE2(expression)
45 #define STRINGIZE2(expression) #expression
46
47 // Like assert(), but not dependent on NDEBUG.
48 #define CHECK(condition) \
49 ((condition) ? (void)0 \
50 : CheckFailed(STRINGIZE(condition), __FILE__, __LINE__))
51 #define ASSERT_EQ(expected, actual) CHECK((expected) == (actual))
52 #define ASSERT_NE(expected, actual) CHECK((expected) != (actual))
53 #define ASSERT_STREQ(expected, actual) \
54 ASSERT_EQ(0, strcmp((expected), (actual)))
55
56 // Test the TfLiteVersion function.
TestVersion(void)57 static void TestVersion(void) {
58 const char *version = TfLiteVersion();
59 printf("Version = %s\n", version);
60 CHECK(version[0] != '\0');
61 }
62
TestInferenceUsingSignature(void)63 static void TestInferenceUsingSignature(void) {
64 TfLiteModel* model = TfLiteModelCreateFromFile(
65 "tensorflow/lite/testdata/multi_signatures.bin");
66 ASSERT_NE(model, NULL);
67
68 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
69 ASSERT_NE(options, NULL);
70 TfLiteInterpreterOptionsSetNumThreads(options, 2);
71
72 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
73 ASSERT_NE(interpreter, NULL);
74
75 // The options can be deleted immediately after interpreter creation.
76 TfLiteInterpreterOptionsDelete(options);
77
78 // (optional) Validate signatures
79 ASSERT_EQ(TfLiteInterpreterGetSignatureCount(interpreter), 2);
80 ASSERT_STREQ(TfLiteInterpreterGetSignatureKey(interpreter, 0), "add");
81 ASSERT_STREQ(TfLiteInterpreterGetSignatureKey(interpreter, 1), "sub");
82
83 // Validate signature "add"
84 TfLiteSignatureRunner* add_runner =
85 TfLiteInterpreterGetSignatureRunner(interpreter, "add");
86 ASSERT_NE(add_runner, NULL);
87 ASSERT_EQ(TfLiteSignatureRunnerGetInputCount(add_runner), 1);
88 ASSERT_STREQ(TfLiteSignatureRunnerGetInputName(add_runner, 0), "x");
89 ASSERT_EQ(TfLiteSignatureRunnerGetOutputCount(add_runner), 1);
90 ASSERT_STREQ(TfLiteSignatureRunnerGetOutputName(add_runner, 0), "output_0");
91
92 // Resize signature "add" input tensor "x"
93 int input_dims[1] = {2};
94 ASSERT_EQ(
95 TfLiteSignatureRunnerResizeInputTensor(add_runner, "x", input_dims, 1),
96 kTfLiteOk);
97
98 // Allocate tensors for signature "add"
99 ASSERT_EQ(TfLiteSignatureRunnerAllocateTensors(add_runner), kTfLiteOk);
100
101 // Validate signature "add" input tensor "x"
102 TfLiteTensor* input_tensor =
103 TfLiteSignatureRunnerGetInputTensor(add_runner, "x");
104 ASSERT_NE(input_tensor, NULL);
105 ASSERT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
106 ASSERT_EQ(TfLiteTensorNumDims(input_tensor), 1);
107 ASSERT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
108 ASSERT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
109 ASSERT_NE(TfLiteTensorData(input_tensor), NULL);
110
111 TfLiteQuantizationParams input_params =
112 TfLiteTensorQuantizationParams(input_tensor);
113 ASSERT_EQ(input_params.scale, 0.f);
114 ASSERT_EQ(input_params.zero_point, 0);
115
116 float input[2] = {2.f, 4.f};
117 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input, 2 * sizeof(float)),
118 kTfLiteOk);
119 ASSERT_EQ(TfLiteSignatureRunnerInvoke(add_runner), kTfLiteOk);
120
121 const TfLiteTensor* output_tensor =
122 TfLiteSignatureRunnerGetOutputTensor(add_runner, "output_0");
123 ASSERT_NE(output_tensor, NULL);
124 ASSERT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
125 ASSERT_EQ(TfLiteTensorNumDims(output_tensor), 1);
126 ASSERT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
127 ASSERT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
128 ASSERT_NE(TfLiteTensorData(output_tensor), NULL);
129
130 TfLiteQuantizationParams output_params =
131 TfLiteTensorQuantizationParams(output_tensor);
132 ASSERT_EQ(output_params.scale, 0.f);
133 ASSERT_EQ(output_params.zero_point, 0);
134
135 float output[2];
136 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output, 2 * sizeof(float)),
137 kTfLiteOk);
138 // Verify the result
139 ASSERT_EQ(output[0], input[0] + 2.f);
140 ASSERT_EQ(output[1], input[1] + 2.f);
141
142 // The signature runner should be deleted before interpreter deletion.
143 TfLiteSignatureRunnerDelete(add_runner);
144 TfLiteInterpreterDelete(interpreter);
145 // The model should only be deleted after destroying the interpreter.
146 TfLiteModelDelete(model);
147 }
148
149 // This test checks if resizing the input (decreasing or increasing it's size)
150 // would invalidate input/output tensors.
TestRepeatResizeInputTensor(void)151 static void TestRepeatResizeInputTensor(void) {
152 TfLiteModel* model = TfLiteModelCreateFromFile(
153 "tensorflow/lite/testdata/multi_signatures.bin");
154 ASSERT_NE(model, NULL);
155
156 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
157 ASSERT_NE(options, NULL);
158 TfLiteInterpreterOptionsSetNumThreads(options, 2);
159
160 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
161 ASSERT_NE(interpreter, NULL);
162
163 TfLiteInterpreterOptionsDelete(options);
164
165 ASSERT_EQ(TfLiteInterpreterGetSignatureCount(interpreter), 2);
166 ASSERT_STREQ(TfLiteInterpreterGetSignatureKey(interpreter, 0), "add");
167 ASSERT_STREQ(TfLiteInterpreterGetSignatureKey(interpreter, 1), "sub");
168
169 TfLiteSignatureRunner* add_runner =
170 TfLiteInterpreterGetSignatureRunner(interpreter, "add");
171 ASSERT_NE(add_runner, NULL);
172 ASSERT_EQ(TfLiteSignatureRunnerGetInputCount(add_runner), 1);
173 ASSERT_STREQ(TfLiteSignatureRunnerGetInputName(add_runner, 0), "x");
174 ASSERT_EQ(TfLiteSignatureRunnerGetOutputCount(add_runner), 1);
175 ASSERT_STREQ(TfLiteSignatureRunnerGetOutputName(add_runner, 0), "output_0");
176
177 TfLiteTensor* input_tensor =
178 TfLiteSignatureRunnerGetInputTensor(add_runner, "x");
179 const TfLiteTensor* output_tensor =
180 TfLiteSignatureRunnerGetOutputTensor(add_runner, "output_0");
181
182 // For different input sizes, resize the input/output tensors and check if
183 // inferences runs as expected.
184
185 int sizes[] = {3, 1, 5};
186 float inputs_1[] = {3.f, 6.f, 11.f};
187 float inputs_2[] = {4.f};
188 float inputs_3[] = {5.f, 8.f, 11.f, 12.f, 20.f};
189 float* all_inputs[] = {inputs_1, inputs_2, inputs_3};
190 float actual_outputs1[] = {0.f, 0.f, 0.f};
191 float actual_outputs2[] = {0.f};
192 float actual_outputs3[] = {0.f, 0.f, 0.f, 0.f, 0.f};
193 float* all_actual_outputs[] = {actual_outputs1, actual_outputs2,
194 actual_outputs3};
195
196 for (int i = 0; i < 3; i++) {
197 int input_dims[] = {sizes[i]};
198 float* inputs = all_inputs[i];
199 ASSERT_EQ(
200 TfLiteSignatureRunnerResizeInputTensor(add_runner, "x", input_dims, 1),
201 kTfLiteOk);
202 ASSERT_EQ(TfLiteSignatureRunnerAllocateTensors(add_runner), kTfLiteOk);
203 ASSERT_NE(input_tensor, NULL);
204 ASSERT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
205 ASSERT_EQ(TfLiteTensorNumDims(input_tensor), 1);
206 ASSERT_EQ(TfLiteTensorDim(input_tensor, 0), sizes[i]);
207 ASSERT_EQ(TfLiteTensorByteSize(input_tensor), sizes[i] * sizeof(float));
208 ASSERT_NE(TfLiteTensorData(input_tensor), NULL);
209 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, inputs,
210 sizes[i] * sizeof(float)),
211 kTfLiteOk);
212 ASSERT_EQ(TfLiteSignatureRunnerInvoke(add_runner), kTfLiteOk);
213 ASSERT_NE(output_tensor, NULL);
214 ASSERT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
215 ASSERT_EQ(TfLiteTensorNumDims(output_tensor), 1);
216 ASSERT_EQ(TfLiteTensorDim(output_tensor, 0), sizes[i]);
217 ASSERT_EQ(TfLiteTensorByteSize(output_tensor), sizes[i] * sizeof(float));
218 ASSERT_NE(TfLiteTensorData(output_tensor), NULL);
219 float* actual_outputs = all_actual_outputs[i];
220 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, actual_outputs,
221 sizes[i] * sizeof(float)),
222 kTfLiteOk);
223 for (int j = 0; j < sizes[i]; j++) {
224 ASSERT_EQ(actual_outputs[j], inputs[j] + 2);
225 }
226 }
227
228 TfLiteSignatureRunnerDelete(add_runner);
229 TfLiteInterpreterDelete(interpreter);
230 TfLiteModelDelete(model);
231 }
232
TestInferenceUsingInterpreter(void)233 static void TestInferenceUsingInterpreter(void) {
234 TfLiteModel* model =
235 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
236 ASSERT_NE(model, NULL);
237
238 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
239 ASSERT_NE(options, NULL);
240 TfLiteInterpreterOptionsSetNumThreads(options, 2);
241
242 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
243 ASSERT_NE(interpreter, NULL);
244
245 // The options can be deleted immediately after interpreter creation.
246 TfLiteInterpreterOptionsDelete(options);
247
248 ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
249 ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
250 ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
251
252 int input_dims[1] = {2};
253 ASSERT_EQ(TfLiteInterpreterResizeInputTensor(interpreter, 0, input_dims, 1),
254 kTfLiteOk);
255 ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
256
257 TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
258 ASSERT_NE(input_tensor, NULL);
259 ASSERT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
260 ASSERT_EQ(TfLiteTensorNumDims(input_tensor), 1);
261 ASSERT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
262 ASSERT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
263 ASSERT_NE(TfLiteTensorData(input_tensor), NULL);
264 ASSERT_STREQ(TfLiteTensorName(input_tensor), "input");
265
266 TfLiteQuantizationParams input_params =
267 TfLiteTensorQuantizationParams(input_tensor);
268 ASSERT_EQ(input_params.scale, 0.f);
269 ASSERT_EQ(input_params.zero_point, 0);
270
271 float input[2] = {1.f, 3.f};
272 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input, 2 * sizeof(float)),
273 kTfLiteOk);
274
275 ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
276
277 const TfLiteTensor* output_tensor =
278 TfLiteInterpreterGetOutputTensor(interpreter, 0);
279 ASSERT_NE(output_tensor, NULL);
280 ASSERT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
281 ASSERT_EQ(TfLiteTensorNumDims(output_tensor), 1);
282 ASSERT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
283 ASSERT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
284 ASSERT_NE(TfLiteTensorData(output_tensor), NULL);
285 ASSERT_STREQ(TfLiteTensorName(output_tensor), "output");
286
287 TfLiteQuantizationParams output_params =
288 TfLiteTensorQuantizationParams(output_tensor);
289 ASSERT_EQ(output_params.scale, 0.f);
290 ASSERT_EQ(output_params.zero_point, 0);
291
292 float output[2];
293 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output, 2 * sizeof(float)),
294 kTfLiteOk);
295 ASSERT_EQ(output[0], 3.f);
296 ASSERT_EQ(output[1], 9.f);
297
298 TfLiteInterpreterDelete(interpreter);
299
300 // The model should only be deleted after destroying the interpreter.
301 TfLiteModelDelete(model);
302 }
303
RunTests(void)304 static void RunTests(void) {
305 TestVersion();
306 TestInferenceUsingSignature();
307 TestRepeatResizeInputTensor();
308 TestInferenceUsingInterpreter();
309 }
310
main(void)311 int main(void) {
312 RunTests();
313 return 0;
314 }
315