1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/micro/micro_interpreter.h"
17
18 #include <cstdint>
19
20 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
21 #include "tensorflow/lite/micro/all_ops_resolver.h"
22 #include "tensorflow/lite/micro/compatibility.h"
23 #include "tensorflow/lite/micro/micro_error_reporter.h"
24 #include "tensorflow/lite/micro/micro_profiler.h"
25 #include "tensorflow/lite/micro/micro_utils.h"
26 #include "tensorflow/lite/micro/recording_micro_allocator.h"
27 #include "tensorflow/lite/micro/test_helpers.h"
28 #include "tensorflow/lite/micro/testing/micro_test.h"
29
30 namespace tflite {
31 namespace {
32
33 class MockProfiler : public MicroProfiler {
34 public:
MockProfiler()35 MockProfiler() : event_starts_(0), event_ends_(0) {}
36
BeginEvent(const char * tag)37 uint32_t BeginEvent(const char* tag) override {
38 event_starts_++;
39 return 0;
40 }
41
EndEvent(uint32_t event_handle)42 void EndEvent(uint32_t event_handle) override { event_ends_++; }
43
event_starts()44 int event_starts() { return event_starts_; }
event_ends()45 int event_ends() { return event_ends_; }
46
47 private:
48 int event_starts_;
49 int event_ends_;
50
51 TF_LITE_REMOVE_VIRTUAL_DELETE
52 };
53
54 } // namespace
55 } // namespace tflite
56
57 TF_LITE_MICRO_TESTS_BEGIN
58
TF_LITE_MICRO_TEST(TestInterpreter)59 TF_LITE_MICRO_TEST(TestInterpreter) {
60 const tflite::Model* model = tflite::testing::GetSimpleMockModel();
61 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
62
63 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
64
65 constexpr size_t allocator_buffer_size = 2000;
66 uint8_t allocator_buffer[allocator_buffer_size];
67
68 // Create a new scope so that we can test the destructor.
69 {
70 tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
71 allocator_buffer_size,
72 tflite::GetMicroErrorReporter());
73 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
74 TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);
75 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
76 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(2), interpreter.outputs_size());
77 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), interpreter.tensors_size());
78
79 TfLiteTensor* input = interpreter.input(0);
80 TF_LITE_MICRO_EXPECT_NE(nullptr, input);
81 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type);
82 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
83 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
84 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), input->bytes);
85 TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32);
86 input->data.i32[0] = 21;
87
88 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
89
90 TfLiteTensor* output = interpreter.output(0);
91 TF_LITE_MICRO_EXPECT_NE(nullptr, output);
92 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
93 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
94 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
95 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), output->bytes);
96 TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
97 TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
98
99 output = interpreter.output(1);
100 TF_LITE_MICRO_EXPECT_NE(nullptr, output);
101 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
102 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
103 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
104 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), output->bytes);
105 TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
106 TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
107 }
108
109 TF_LITE_MICRO_EXPECT_EQ(tflite::testing::MockCustom::freed_, true);
110 }
111
TF_LITE_MICRO_TEST(TestMultiTenantInterpreter)112 TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) {
113 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
114 constexpr size_t arena_size = 8192;
115 uint8_t arena[arena_size];
116
117 size_t simple_model_head_usage = 0, complex_model_head_usage = 0;
118
119 // Get simple_model_head_usage.
120 {
121 tflite::RecordingMicroAllocator* allocator =
122 tflite::RecordingMicroAllocator::Create(
123 arena, arena_size, tflite::GetMicroErrorReporter());
124 const tflite::Model* model0 = tflite::testing::GetSimpleMockModel();
125 tflite::MicroInterpreter interpreter0(model0, op_resolver, allocator,
126 tflite::GetMicroErrorReporter());
127 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter0.AllocateTensors());
128 simple_model_head_usage =
129 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes();
130
131 TfLiteTensor* input = interpreter0.input(0);
132 TfLiteTensor* output = interpreter0.output(0);
133 input->data.i32[0] = 21;
134 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter0.Invoke());
135 TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]);
136 }
137
138 // Shared allocator for various models.
139 tflite::RecordingMicroAllocator* allocator =
140 tflite::RecordingMicroAllocator::Create(arena, arena_size,
141 tflite::GetMicroErrorReporter());
142
143 // Get complex_model_head_usage. No head space reuse since it's the first
144 // model allocated in the `allocator`.
145 const tflite::Model* model1 = tflite::testing::GetComplexMockModel();
146 tflite::MicroInterpreter interpreter1(model1, op_resolver, allocator,
147 tflite::GetMicroErrorReporter());
148 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter1.AllocateTensors());
149 TfLiteTensor* input1 = interpreter1.input(0);
150 TfLiteTensor* output1 = interpreter1.output(0);
151 complex_model_head_usage =
152 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes();
153
154 // Allocate simple model from the same `allocator`. Some head space will
155 // be reused thanks to multi-tenant TFLM support. Also makes sure that
156 // the output is correct.
157 const tflite::Model* model2 = tflite::testing::GetSimpleMockModel();
158 tflite::MicroInterpreter interpreter2(model2, op_resolver, allocator,
159 tflite::GetMicroErrorReporter());
160 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter2.AllocateTensors());
161 TfLiteTensor* input2 = interpreter2.input(0);
162 TfLiteTensor* output2 = interpreter2.output(0);
163 // Verify that 1 + 1 < 2.
164 size_t multi_tenant_head_usage =
165 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes();
166 TF_LITE_MICRO_EXPECT_LE(multi_tenant_head_usage,
167 complex_model_head_usage + simple_model_head_usage);
168
169 // Now we have model1 and model2 sharing the same `allocator`.
170 // Let's make sure that they can produce correct results.
171 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input1->type);
172 input1->data.i32[0] = 10;
173 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter1.Invoke());
174 // Output tensor for the first model.
175 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output1->type);
176 TF_LITE_MICRO_EXPECT_EQ(10, output1->data.i32[0]);
177
178 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input2->type);
179 input2->data.i32[0] = 21;
180 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter2.Invoke());
181 // Output for the second model.
182 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output2->type);
183 TF_LITE_MICRO_EXPECT_EQ(42, output2->data.i32[0]);
184
185 // Allocate another complex model from the `allocator` will not increase
186 // head space usage.
187 const tflite::Model* model3 = tflite::testing::GetComplexMockModel();
188 tflite::MicroInterpreter interpreter3(model3, op_resolver, allocator,
189 tflite::GetMicroErrorReporter());
190 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter3.AllocateTensors());
191 TfLiteTensor* input3 = interpreter3.input(0);
192 TfLiteTensor* output3 = interpreter3.output(0);
193 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input3->type);
194 input3->data.i32[0] = 10;
195 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter3.Invoke());
196 // Output tensor for the third model.
197 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output3->type);
198 TF_LITE_MICRO_EXPECT_EQ(10, output3->data.i32[0]);
199 // No increase on the head usage as we're reusing the space.
200 TF_LITE_MICRO_EXPECT_EQ(
201 multi_tenant_head_usage,
202 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes());
203 }
204
TF_LITE_MICRO_TEST(TestKernelMemoryPlanning)205 TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) {
206 const tflite::Model* model = tflite::testing::GetSimpleStatefulModel();
207 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
208
209 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
210
211 constexpr size_t allocator_buffer_size = 4096;
212 uint8_t allocator_buffer[allocator_buffer_size];
213
214 tflite::RecordingMicroAllocator* allocator =
215 tflite::RecordingMicroAllocator::Create(allocator_buffer,
216 allocator_buffer_size,
217 tflite::GetMicroErrorReporter());
218
219 // Make sure kernel memory planning works in multi-tenant context.
220 for (int i = 0; i < 3; i++) {
221 tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
222 tflite::GetMicroErrorReporter());
223 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
224 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
225 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(2), interpreter.outputs_size());
226
227 TfLiteTensor* input = interpreter.input(0);
228 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
229 TF_LITE_MICRO_EXPECT_EQ(3, input->dims->data[0]);
230 input->data.uint8[0] = 2;
231 input->data.uint8[1] = 3;
232 input->data.uint8[2] = 1;
233
234 uint8_t expected_median = 2;
235
236 {
237 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
238 TfLiteTensor* median = interpreter.output(0);
239 TF_LITE_MICRO_EXPECT_EQ(expected_median, median->data.uint8[0]);
240 TfLiteTensor* invoke_count = interpreter.output(1);
241 TF_LITE_MICRO_EXPECT_EQ(1, invoke_count->data.i32[0]);
242 }
243
244 {
245 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
246 TfLiteTensor* median = interpreter.output(0);
247 TF_LITE_MICRO_EXPECT_EQ(expected_median, median->data.uint8[0]);
248 TfLiteTensor* invoke_count = interpreter.output(1);
249 TF_LITE_MICRO_EXPECT_EQ(2, invoke_count->data.i32[0]);
250 }
251 }
252 }
253
TF_LITE_MICRO_TEST(TestVariableTensorReset)254 TF_LITE_MICRO_TEST(TestVariableTensorReset) {
255 const tflite::Model* model = tflite::testing::GetComplexMockModel();
256 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
257
258 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
259
260 constexpr size_t allocator_buffer_size =
261 3072 /* optimal arena size at the time of writting. */ +
262 16 /* alignment */ + 100 /* some headroom */;
263 uint8_t allocator_buffer[allocator_buffer_size];
264 tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
265 allocator_buffer_size,
266 tflite::GetMicroErrorReporter());
267 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
268 TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100);
269 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
270 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.outputs_size());
271
272 // Assign hard-code values:
273 for (size_t i = 0; i < interpreter.tensors_size(); ++i) {
274 TfLiteTensor* cur_tensor = interpreter.tensor(i);
275 int buffer_length = tflite::ElementCount(*cur_tensor->dims);
276 // Assign all buffers to non-zero values. Variable tensors will be assigned
277 // 2 here and will be verified that they have been reset after the API call.
278 int buffer_value = cur_tensor->is_variable ? 2 : 1;
279 switch (cur_tensor->type) {
280 case kTfLiteInt32: {
281 int32_t* buffer = tflite::GetTensorData<int32_t>(cur_tensor);
282 for (int j = 0; j < buffer_length; ++j) {
283 buffer[j] = static_cast<int32_t>(buffer_value);
284 }
285 break;
286 }
287 case kTfLiteUInt8: {
288 uint8_t* buffer = tflite::GetTensorData<uint8_t>(cur_tensor);
289 for (int j = 0; j < buffer_length; ++j) {
290 buffer[j] = static_cast<uint8_t>(buffer_value);
291 }
292 break;
293 }
294 default:
295 TF_LITE_MICRO_FAIL("Unsupported dtype");
296 }
297 }
298
299 interpreter.ResetVariableTensors();
300
301 // Ensure only variable tensors have been reset to zero:
302 for (size_t i = 0; i < interpreter.tensors_size(); ++i) {
303 TfLiteTensor* cur_tensor = interpreter.tensor(i);
304 int buffer_length = tflite::ElementCount(*cur_tensor->dims);
305 // Variable tensors should be zero (not the value assigned in the for loop
306 // above).
307 int buffer_value = cur_tensor->is_variable ? 0 : 1;
308 switch (cur_tensor->type) {
309 case kTfLiteInt32: {
310 int32_t* buffer = tflite::GetTensorData<int32_t>(cur_tensor);
311 for (int j = 0; j < buffer_length; ++j) {
312 TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]);
313 }
314 break;
315 }
316 case kTfLiteUInt8: {
317 uint8_t* buffer = tflite::GetTensorData<uint8_t>(cur_tensor);
318 for (int j = 0; j < buffer_length; ++j) {
319 TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]);
320 }
321 break;
322 }
323 default:
324 TF_LITE_MICRO_FAIL("Unsupported dtype");
325 }
326 }
327 }
328
329 // The interpreter initialization requires multiple steps and this test case
330 // ensures that simply creating and destructing an interpreter object is ok.
331 // b/147830765 has one example of a change that caused trouble for this simple
332 // case.
TF_LITE_MICRO_TEST(TestIncompleteInitialization)333 TF_LITE_MICRO_TEST(TestIncompleteInitialization) {
334 const tflite::Model* model = tflite::testing::GetComplexMockModel();
335 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
336
337 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
338
339 constexpr size_t allocator_buffer_size = 2048;
340 uint8_t allocator_buffer[allocator_buffer_size];
341
342 tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
343 allocator_buffer_size,
344 tflite::GetMicroErrorReporter());
345 }
346
347 // Test that an interpreter with a supplied profiler correctly calls the
348 // profiler each time an operator is invoked.
TF_LITE_MICRO_TEST(InterpreterWithProfilerShouldProfileOps)349 TF_LITE_MICRO_TEST(InterpreterWithProfilerShouldProfileOps) {
350 const tflite::Model* model = tflite::testing::GetComplexMockModel();
351 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
352
353 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
354
355 constexpr size_t allocator_buffer_size = 2048;
356 uint8_t allocator_buffer[allocator_buffer_size];
357 tflite::MockProfiler profiler;
358 tflite::MicroInterpreter interpreter(
359 model, op_resolver, allocator_buffer, allocator_buffer_size,
360 tflite::GetMicroErrorReporter(), &profiler);
361
362 TF_LITE_MICRO_EXPECT_EQ(profiler.event_starts(), 0);
363 TF_LITE_MICRO_EXPECT_EQ(profiler.event_ends(), 0);
364 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
365 TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteOk);
366 #ifndef NDEBUG
367 TF_LITE_MICRO_EXPECT_EQ(profiler.event_starts(), 3);
368 TF_LITE_MICRO_EXPECT_EQ(profiler.event_ends(), 3);
369 #else // Profile events will not occur on release builds.
370 TF_LITE_MICRO_EXPECT_EQ(profiler.event_starts(), 0);
371 TF_LITE_MICRO_EXPECT_EQ(profiler.event_ends(), 0);
372 #endif
373 }
374
TF_LITE_MICRO_TEST(TestIncompleteInitializationAllocationsWithSmallArena)375 TF_LITE_MICRO_TEST(TestIncompleteInitializationAllocationsWithSmallArena) {
376 const tflite::Model* model = tflite::testing::GetComplexMockModel();
377 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
378
379 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
380
381 constexpr size_t allocator_buffer_size = 512;
382 uint8_t allocator_buffer[allocator_buffer_size];
383
384 tflite::RecordingMicroAllocator* allocator =
385 tflite::RecordingMicroAllocator::Create(allocator_buffer,
386 allocator_buffer_size,
387 tflite::GetMicroErrorReporter());
388 TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
389
390 tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
391 tflite::GetMicroErrorReporter());
392
393 // Interpreter fails because arena is too small:
394 TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteError);
395
396 TF_LITE_MICRO_EXPECT_EQ(
397 static_cast<size_t>(192),
398 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes());
399
400 // Ensure allocations are zero (ignore tail since some internal structs are
401 // initialized with this space):
402 TF_LITE_MICRO_EXPECT_EQ(
403 static_cast<size_t>(0),
404 allocator
405 ->GetRecordedAllocation(
406 tflite::RecordedAllocationType::kTfLiteEvalTensorData)
407 .used_bytes);
408 TF_LITE_MICRO_EXPECT_EQ(
409 static_cast<size_t>(0),
410 allocator
411 ->GetRecordedAllocation(
412 tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData)
413 .used_bytes);
414 TF_LITE_MICRO_EXPECT_EQ(
415 static_cast<size_t>(0),
416 allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData)
417 .used_bytes);
418 }
419
TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke)420 TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) {
421 const tflite::Model* model = tflite::testing::GetComplexMockModel();
422 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
423
424 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
425
426 constexpr size_t allocator_buffer_size = 1024 * 10;
427 uint8_t allocator_buffer[allocator_buffer_size];
428
429 tflite::RecordingMicroAllocator* allocator =
430 tflite::RecordingMicroAllocator::Create(allocator_buffer,
431 allocator_buffer_size,
432 tflite::GetMicroErrorReporter());
433 TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
434
435 tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
436 tflite::GetMicroErrorReporter());
437
438 // Ensure allocations are zero (ignore tail since some internal structs are
439 // initialized with this space):
440 TF_LITE_MICRO_EXPECT_EQ(
441 static_cast<size_t>(0),
442 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes());
443 TF_LITE_MICRO_EXPECT_EQ(
444 static_cast<size_t>(0),
445 allocator
446 ->GetRecordedAllocation(
447 tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData)
448 .used_bytes);
449 TF_LITE_MICRO_EXPECT_EQ(
450 static_cast<size_t>(0),
451 allocator
452 ->GetRecordedAllocation(
453 tflite::RecordedAllocationType::kTfLiteEvalTensorData)
454 .used_bytes);
455 TF_LITE_MICRO_EXPECT_EQ(
456 static_cast<size_t>(0),
457 allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData)
458 .used_bytes);
459
460 TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteOk);
461 allocator->PrintAllocations();
462
463 // Allocation sizes vary based on platform - check that allocations are now
464 // non-zero:
465 TF_LITE_MICRO_EXPECT_GT(
466 allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes(),
467 static_cast<size_t>(0));
468 TF_LITE_MICRO_EXPECT_GT(
469 allocator
470 ->GetRecordedAllocation(
471 tflite::RecordedAllocationType::kTfLiteEvalTensorData)
472 .used_bytes,
473 0);
474
475 TF_LITE_MICRO_EXPECT_GT(
476 allocator
477 ->GetRecordedAllocation(
478 tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData)
479 .used_bytes,
480 static_cast<size_t>(0));
481
482 // TODO(b/160160549): This check is mostly meaningless right now because the
483 // operator creation in our mock models is inconsistent. Revisit what
484 // this check should be once the mock models are properly created.
485 TF_LITE_MICRO_EXPECT_EQ(
486 allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData)
487 .used_bytes,
488 static_cast<size_t>(0));
489 }
490
TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs)491 TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) {
492 const tflite::Model* model = tflite::testing::GetSimpleMultipleInputsModel();
493 TF_LITE_MICRO_EXPECT_NE(nullptr, model);
494
495 tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
496
497 constexpr size_t allocator_buffer_size = 2000;
498 uint8_t allocator_buffer[allocator_buffer_size];
499
500 // Create a new scope so that we can test the destructor.
501 {
502 tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
503 allocator_buffer_size,
504 tflite::GetMicroErrorReporter());
505
506 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
507 TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);
508
509 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(3), interpreter.inputs_size());
510 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.outputs_size());
511 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), interpreter.tensors_size());
512
513 TfLiteTensor* input = interpreter.input(0);
514 TF_LITE_MICRO_EXPECT_NE(nullptr, input);
515 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type);
516 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
517 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
518 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), input->bytes);
519 TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32);
520 input->data.i32[0] = 21;
521
522 TfLiteTensor* input1 = interpreter.input(1);
523 TF_LITE_MICRO_EXPECT_NE(nullptr, input1);
524 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input1->type);
525 TF_LITE_MICRO_EXPECT_EQ(1, input1->dims->size);
526 TF_LITE_MICRO_EXPECT_EQ(1, input1->dims->data[0]);
527 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), input1->bytes);
528 TF_LITE_MICRO_EXPECT_NE(nullptr, input1->data.i32);
529 input1->data.i32[0] = 21;
530
531 TfLiteTensor* input2 = interpreter.input(2);
532 TF_LITE_MICRO_EXPECT_NE(nullptr, input2);
533 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input2->type);
534 TF_LITE_MICRO_EXPECT_EQ(1, input2->dims->size);
535 TF_LITE_MICRO_EXPECT_EQ(1, input2->dims->data[0]);
536 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), input2->bytes);
537 TF_LITE_MICRO_EXPECT_NE(nullptr, input2->data.i32);
538 input2->data.i32[0] = 24;
539
540 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
541
542 TfLiteTensor* output = interpreter.output(0);
543 TF_LITE_MICRO_EXPECT_NE(nullptr, output);
544 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
545 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
546 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
547 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), output->bytes);
548 TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
549 TF_LITE_MICRO_EXPECT_EQ(66, output->data.i32[0]);
550 }
551
552 TF_LITE_MICRO_EXPECT_EQ(tflite::testing::MultipleInputs::freed_, true);
553 }
554
555 TF_LITE_MICRO_TESTS_END
556