blob: 4990e62379c8db337447cbc81e067d93f6e70461 [file] [log] [blame]
Kristofer Jonsson641c0912020-08-31 11:34:14 +02001/*
Per Åstrand90455452021-02-25 11:10:08 +01002 * Copyright (c) 2019-2021 Arm Limited. All rights reserved.
Kristofer Jonsson641c0912020-08-31 11:34:14 +02003 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include "tensorflow/lite/micro/all_ops_resolver.h"
Måns Nilsson231e1d92020-11-05 12:19:34 +010020#include "tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h"
Kristofer Jonsson641c0912020-08-31 11:34:14 +020021#include "tensorflow/lite/micro/micro_error_reporter.h"
22#include "tensorflow/lite/micro/micro_interpreter.h"
Bhavik Patelffe845d2020-11-16 12:13:56 +010023#include "tensorflow/lite/micro/micro_profiler.h"
Kristofer Jonsson641c0912020-08-31 11:34:14 +020024#include "tensorflow/lite/schema/schema_generated.h"
Kristofer Jonsson641c0912020-08-31 11:34:14 +020025
Jens Elofsson955288a2021-04-22 20:57:15 +020026#include "arm_profiler.hpp"
Kristofer Jonsson3bd34232021-08-30 13:55:55 +020027#ifdef LAYER_BY_LAYER_PROFILER
Jens Elofsson701a63b2021-05-23 17:37:07 +020028#include "layer_by_layer_profiler.hpp"
Jens Elofsson955288a2021-04-22 20:57:15 +020029#endif
Anton Moberg07cf70b2021-07-07 11:08:17 +020030#include "ethosu_log.h"
Jens Elofsson955288a2021-04-22 20:57:15 +020031
Kristofer Jonsson641c0912020-08-31 11:34:14 +020032#include "inference_process.hpp"
33
Per Åstrandd9afc082020-10-06 13:25:08 +020034#include "cmsis_compiler.h"
35
Per Åstrand91a91732020-09-25 15:04:26 +020036#include <inttypes.h>
37
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +020038using namespace std;
39
Kristofer Jonsson641c0912020-08-31 11:34:14 +020040namespace {
Måns Nilsson231e1d92020-11-05 12:19:34 +010041
42void tflu_debug_log(const char *s) {
Anton Moberge39d3892021-08-17 11:45:18 +020043 LOG("%s", s);
Måns Nilsson231e1d92020-11-05 12:19:34 +010044}
45
Henrik Hoglindae4d8302021-12-08 15:06:02 +010046class Crc {
47public:
48 constexpr Crc() : table() {
49 uint32_t poly = 0xedb88320;
50
51 for (uint32_t i = 0; i < 256; i++) {
52 uint32_t crc = i;
53
54 for (int j = 0; j < 8; j++) {
55 if (crc & 1) {
56 crc = poly ^ (crc >> 1);
57 } else {
58 crc >>= 1;
59 }
60 }
61
62 table[i] = crc;
63 }
64 }
65
66 uint32_t crc32(const void *data, const size_t length, uint32_t init = 0) const {
67 uint32_t crc = init ^ 0xffffffff;
68
69 const uint8_t *v = static_cast<const uint8_t *>(data);
70
71 for (size_t i = 0; i < length; i++) {
72 crc = table[(crc ^ v[i]) & 0xff] ^ (crc >> 8);
73 }
74
75 return crc ^ 0xffffffff;
76 }
77
78private:
79 uint32_t table[256];
80};
81
Kristofer Jonsson641c0912020-08-31 11:34:14 +020082void print_output_data(TfLiteTensor *output, size_t bytesToPrint) {
Henrik Hoglindae4d8302021-12-08 15:06:02 +010083 constexpr auto crc = Crc();
84 const uint32_t output_crc32 = crc.crc32(output->data.data, output->bytes);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +020085 const int numBytesToPrint = min(output->bytes, bytesToPrint);
Anton Moberg07cf70b2021-07-07 11:08:17 +020086 int dims_size = output->dims->size;
87 LOG("{\n");
88 LOG("\"dims\": [%d,", dims_size);
Kristofer Jonsson641c0912020-08-31 11:34:14 +020089 for (int i = 0; i < output->dims->size - 1; ++i) {
Anton Moberg07cf70b2021-07-07 11:08:17 +020090 LOG("%d,", output->dims->data[i]);
Kristofer Jonsson641c0912020-08-31 11:34:14 +020091 }
Anton Moberg07cf70b2021-07-07 11:08:17 +020092 LOG("%d],\n", output->dims->data[dims_size - 1]);
93 LOG("\"data_address\": \"%08" PRIx32 "\",\n", (uint32_t)output->data.data);
Henrik Hoglindae4d8302021-12-08 15:06:02 +010094 if (numBytesToPrint)
95 {
96 LOG("\"crc32\": \"%08" PRIx32 "\",\n", output_crc32);
97 LOG("\"data\":\"");
98 for (int i = 0; i < numBytesToPrint - 1; ++i) {
99 /*
100 * Workaround an issue when compiling with GCC where by
101 * printing only a '\n' the produced global output is wrong.
102 */
103 if (i % 15 == 0 && i != 0) {
104 LOG("0x%02x,\n", output->data.uint8[i]);
105 } else {
106 LOG("0x%02x,", output->data.uint8[i]);
107 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200108 }
Henrik Hoglindae4d8302021-12-08 15:06:02 +0100109 LOG("0x%02x\"\n", output->data.uint8[numBytesToPrint - 1]);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200110 }
Henrik Hoglindae4d8302021-12-08 15:06:02 +0100111 else {
112 LOG("\"crc32\": \"%08" PRIx32 "\"\n", output_crc32);
113 }
Anton Moberg07cf70b2021-07-07 11:08:17 +0200114 LOG("}");
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200115}
116
117bool copyOutput(const TfLiteTensor &src, InferenceProcess::DataPtr &dst) {
118 if (dst.data == nullptr) {
119 return false;
120 }
121
122 if (src.bytes > dst.size) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100123 LOG_ERR("Tensor size mismatch (bytes): actual=%d, expected%d.", src.bytes, dst.size);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200124 return true;
125 }
126
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200127 copy(src.data.uint8, src.data.uint8 + src.bytes, static_cast<uint8_t *>(dst.data));
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200128 dst.size = src.bytes;
129
130 return false;
131}
132
133} // namespace
134
135namespace InferenceProcess {
Per Åstrandbbd9c8f2020-09-25 15:07:35 +0200136DataPtr::DataPtr(void *_data, size_t _size) : data(_data), size(_size) {}
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200137
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100138void DataPtr::invalidate() {
139#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100140 SCB_InvalidateDCache_by_Addr(reinterpret_cast<uint32_t *>(data), size);
141#endif
142}
143
144void DataPtr::clean() {
145#if defined(__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100146 SCB_CleanDCache_by_Addr(reinterpret_cast<uint32_t *>(data), size);
147#endif
148}
149
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200150InferenceJob::InferenceJob() : numBytesToPrint(0) {}
151
Per Åstrandbbd9c8f2020-09-25 15:07:35 +0200152InferenceJob::InferenceJob(const string &_name,
153 const DataPtr &_networkModel,
154 const vector<DataPtr> &_input,
155 const vector<DataPtr> &_output,
156 const vector<DataPtr> &_expectedOutput,
Bhavik Patelffe845d2020-11-16 12:13:56 +0100157 size_t _numBytesToPrint,
158 const vector<uint8_t> &_pmuEventConfig,
Bhavik Patel97906eb2020-12-17 15:32:16 +0100159 const uint32_t _pmuCycleCounterEnable) :
Per Åstrandbbd9c8f2020-09-25 15:07:35 +0200160 name(_name),
161 networkModel(_networkModel), input(_input), output(_output), expectedOutput(_expectedOutput),
Bhavik Patel97906eb2020-12-17 15:32:16 +0100162 numBytesToPrint(_numBytesToPrint), pmuEventConfig(_pmuEventConfig), pmuCycleCounterEnable(_pmuCycleCounterEnable),
Jens Elofssonde044c32021-05-06 16:21:29 +0200163 pmuEventCount(), pmuCycleCounterCount(0) {}
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200164
Kristofer Jonsson34e24962020-11-23 16:22:10 +0100165void InferenceJob::invalidate() {
166 networkModel.invalidate();
167
168 for (auto &it : input) {
169 it.invalidate();
170 }
171
172 for (auto &it : output) {
173 it.invalidate();
174 }
175
176 for (auto &it : expectedOutput) {
177 it.invalidate();
178 }
179}
180
181void InferenceJob::clean() {
182 networkModel.clean();
183
184 for (auto &it : input) {
185 it.clean();
186 }
187
188 for (auto &it : output) {
189 it.clean();
190 }
191
192 for (auto &it : expectedOutput) {
193 it.clean();
194 }
195}
196
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200197// NOTE: Adding code for get_lock & free_lock with some corrections from
198// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHEJCHB.html
199// TODO: check correctness?
200void InferenceProcess::getLock() {
201 int status = 0;
202
203 do {
204 // Wait until lock_var is free
205 while (__LDREXW(&lock) != 0)
206 ;
207
208 // Try to set lock_var
209 status = __STREXW(1, &lock);
210 } while (status != 0);
211
212 // Do not start any other memory access until memory barrier is completed
213 __DMB();
214}
215
216// TODO: check correctness?
217void InferenceProcess::freeLock() {
218 // Ensure memory operations completed before releasing lock
219 __DMB();
220
221 lock = 0;
222}
223
224bool InferenceProcess::push(const InferenceJob &job) {
225 getLock();
226 inferenceJobQueue.push(job);
227 freeLock();
228
229 return true;
230}
231
232bool InferenceProcess::runJob(InferenceJob &job) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100233 LOG_INFO("Running inference job: %s", job.name.c_str());
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200234
Bhavik Patelffe845d2020-11-16 12:13:56 +0100235 // Register debug log callback for profiling
236 RegisterDebugLogCallback(tflu_debug_log);
237
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200238 tflite::MicroErrorReporter microErrorReporter;
239 tflite::ErrorReporter *reporter = &microErrorReporter;
240
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200241 // Get model handle and verify that the version is correct
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200242 const tflite::Model *model = ::tflite::GetModel(job.networkModel.data);
243 if (model->version() != TFLITE_SCHEMA_VERSION) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100244 LOG_ERR("Model schema version unsupported: version=%" PRIu32 ", supported=%d.",
Anton Moberg07cf70b2021-07-07 11:08:17 +0200245 model->version(),
246 TFLITE_SCHEMA_VERSION);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200247 return true;
248 }
249
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200250 // Create the TFL micro interpreter
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200251 tflite::AllOpsResolver resolver;
Kristofer Jonsson3bd34232021-08-30 13:55:55 +0200252#ifdef LAYER_BY_LAYER_PROFILER
Jens Elofsson701a63b2021-05-23 17:37:07 +0200253 tflite::LayerByLayerProfiler profiler;
Jens Elofsson955288a2021-04-22 20:57:15 +0200254#else
255 tflite::ArmProfiler profiler;
Bhavik Patelffe845d2020-11-16 12:13:56 +0100256#endif
Jens Elofssonde044c32021-05-06 16:21:29 +0200257
Jens Elofsson97dde7e2021-09-08 16:20:08 +0200258 tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, reporter, nullptr, &profiler);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200259
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200260 // Allocate tensors
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200261 TfLiteStatus allocate_status = interpreter.AllocateTensors();
262 if (allocate_status != kTfLiteOk) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100263 LOG_ERR("Failed to allocate tensors for inference: job=%s", job.name.c_str());
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200264 return true;
265 }
266
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200267 // Create a filtered list of non empty input tensors
268 vector<TfLiteTensor *> inputTensors;
269 for (size_t i = 0; i < interpreter.inputs_size(); ++i) {
270 TfLiteTensor *tensor = interpreter.input(i);
271
272 if (tensor->bytes > 0) {
273 inputTensors.push_back(tensor);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200274 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200275 }
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200276 if (job.input.size() != inputTensors.size()) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100277 LOG_ERR("Number of input buffers does not match number of non empty network tensors: input=%zu, network=%zu",
Anton Moberg07cf70b2021-07-07 11:08:17 +0200278 job.input.size(),
279 inputTensors.size());
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200280 return true;
281 }
282
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200283 // Copy input data
284 for (size_t i = 0; i < inputTensors.size(); ++i) {
285 const DataPtr &input = job.input[i];
286 const TfLiteTensor *tensor = inputTensors[i];
287
288 if (input.size != tensor->bytes) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100289 LOG_ERR("Job input size does not match network input size: job=%s, index=%zu, input=%zu, network=%u",
Anton Moberg07cf70b2021-07-07 11:08:17 +0200290 job.name.c_str(),
291 i,
292 input.size,
293 tensor->bytes);
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200294 return true;
295 }
296
297 copy(static_cast<char *>(input.data), static_cast<char *>(input.data) + input.size, tensor->data.uint8);
298 }
299
300 // Run the inference
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200301 TfLiteStatus invoke_status = interpreter.Invoke();
302 if (invoke_status != kTfLiteOk) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100303 LOG_ERR("Invoke failed for inference: job=%s", job.name.c_str());
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200304 return true;
305 }
306
Anton Moberg07cf70b2021-07-07 11:08:17 +0200307 LOG("arena_used_bytes : %zu\n", interpreter.arena_used_bytes());
Bhavik Patelffe845d2020-11-16 12:13:56 +0100308
Anton Moberg07cf70b2021-07-07 11:08:17 +0200309 LOG("Inference runtime: %u cycles\n", (unsigned int)profiler.GetTotalTicks());
Bhavik Patelffe845d2020-11-16 12:13:56 +0100310
311 if (job.pmuCycleCounterEnable != 0) {
Kristofer Jonsson91f600c2021-02-10 11:29:52 +0100312 job.pmuCycleCounterCount = profiler.GetTotalTicks();
Bhavik Patelffe845d2020-11-16 12:13:56 +0100313 }
314
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200315 // Copy output data
316 if (job.output.size() > 0) {
317 if (interpreter.outputs_size() != job.output.size()) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100318 LOG_ERR("Output size mismatch: job=%zu, network=%u", job.output.size(), interpreter.outputs_size());
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200319 return true;
320 }
321
322 for (unsigned i = 0; i < interpreter.outputs_size(); ++i) {
323 if (copyOutput(*interpreter.output(i), job.output[i])) {
324 return true;
325 }
326 }
327 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200328
Henrik Hoglindae4d8302021-12-08 15:06:02 +0100329 // Print all of the output data, or the first NUM_BYTES_TO_PRINT bytes,
330 // whichever comes first as well as the output shape.
331 LOG("num_of_outputs: %d\n", interpreter.outputs_size());
332 LOG("output_begin\n");
333 LOG("[\n");
334 for (unsigned int i = 0; i < interpreter.outputs_size(); i++) {
335 TfLiteTensor *output = interpreter.output(i);
336 print_output_data(output, job.numBytesToPrint);
337 if (i != interpreter.outputs_size() - 1) {
338 LOG(",\n");
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200339 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200340 }
Henrik Hoglindae4d8302021-12-08 15:06:02 +0100341 LOG("]\n");
342 LOG("output_end\n");
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200343
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200344 if (job.expectedOutput.size() > 0) {
345 if (job.expectedOutput.size() != interpreter.outputs_size()) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100346 LOG_ERR("Expected number of output tensors mismatch: job=%s, expected=%zu, network=%zu",
Anton Moberg07cf70b2021-07-07 11:08:17 +0200347 job.name.c_str(),
348 job.expectedOutput.size(),
349 interpreter.outputs_size());
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200350 return true;
351 }
352
353 for (unsigned int i = 0; i < interpreter.outputs_size(); i++) {
354 const DataPtr &expected = job.expectedOutput[i];
355 const TfLiteTensor *output = interpreter.output(i);
356
357 if (expected.size != output->bytes) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100358 LOG_ERR("Expected output tensor size mismatch: job=%s, index=%u, expected=%zu, network=%zu",
Anton Moberg07cf70b2021-07-07 11:08:17 +0200359 job.name.c_str(),
360 i,
361 expected.size,
362 output->bytes);
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200363 return true;
364 }
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200365
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200366 for (unsigned int j = 0; j < output->bytes; ++j) {
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200367 if (output->data.uint8[j] != static_cast<uint8_t *>(expected.data)[j]) {
Anton Moberg07cf70b2021-07-07 11:08:17 +0200368 LOG_ERR("Expected output tensor data mismatch: job=%s, index=%u, offset=%u, "
369 "expected=%02x, network=%02x\n",
370 job.name.c_str(),
371 i,
372 j,
373 static_cast<uint8_t *>(expected.data)[j],
374 output->data.uint8[j]);
Per Åstrand90455452021-02-25 11:10:08 +0100375 return true;
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200376 }
377 }
378 }
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200379 }
Kristofer Jonsson72fa50b2020-09-10 13:26:41 +0200380
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100381 LOG_INFO("Finished running job: %s", job.name.c_str());
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200382
383 return false;
Anton Moberg07cf70b2021-07-07 11:08:17 +0200384} // namespace InferenceProcess
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200385
386bool InferenceProcess::run(bool exitOnEmpty) {
387 bool anyJobFailed = false;
388
389 while (true) {
390 getLock();
391 bool empty = inferenceJobQueue.empty();
392 freeLock();
393
394 if (empty) {
395 if (exitOnEmpty) {
Kristofer Jonssoneb912392021-11-12 12:51:27 +0100396 LOG_INFO("Exit from InferenceProcess::run() due to empty job queue");
Kristofer Jonsson641c0912020-08-31 11:34:14 +0200397 break;
398 }
399
400 continue;
401 }
402
403 getLock();
404 InferenceJob job = inferenceJobQueue.front();
405 inferenceJobQueue.pop();
406 freeLock();
407
408 if (runJob(job)) {
409 anyJobFailed = true;
410 continue;
411 }
412 }
413
414 return anyJobFailed;
415}
416
417} // namespace InferenceProcess