blob: e04cefc23a55166ca07fedf1f4cf4cf7160038da [file] [log] [blame]
alexander3c798932021-03-26 21:42:19 +00001/*
Richard Burtoned35a6f2022-02-14 11:55:35 +00002 * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
alexander3c798932021-03-26 21:42:19 +00003 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "UseCaseHandler.hpp"
18
19#include "InputFiles.hpp"
20#include "Classifier.hpp"
Kshitij Sisodia76a15802021-12-24 11:05:11 +000021#include "MicroNetKwsModel.hpp"
alexander3c798932021-03-26 21:42:19 +000022#include "hal.h"
Kshitij Sisodia76a15802021-12-24 11:05:11 +000023#include "MicroNetKwsMfcc.hpp"
alexander3c798932021-03-26 21:42:19 +000024#include "AudioUtils.hpp"
Richard Burtoned35a6f2022-02-14 11:55:35 +000025#include "ImageUtils.hpp"
alexander3c798932021-03-26 21:42:19 +000026#include "UseCaseCommonUtils.hpp"
27#include "KwsResult.hpp"
alexander31ae9f02022-02-10 16:15:54 +000028#include "log_macros.h"
alexander3c798932021-03-26 21:42:19 +000029
30#include <vector>
31#include <functional>
32
33using KwsClassifier = arm::app::Classifier;
34
35namespace arm {
36namespace app {
37
Kshitij Sisodia68fdd112022-04-06 13:03:20 +010038
alexander3c798932021-03-26 21:42:19 +000039 /**
40 * @brief Presents inference results using the data presentation
41 * object.
alexander3c798932021-03-26 21:42:19 +000042 * @param[in] results Vector of classification results to be displayed.
alexander3c798932021-03-26 21:42:19 +000043 * @return true if successful, false otherwise.
44 **/
Kshitij Sisodia68fdd112022-04-06 13:03:20 +010045 static bool PresentInferenceResult(const std::vector<arm::app::kws::KwsResult>& results);
alexander3c798932021-03-26 21:42:19 +000046
47 /**
48 * @brief Returns a function to perform feature calculation and populates input tensor data with
49 * MFCC data.
50 *
51 * Input tensor data type check is performed to choose correct MFCC feature data type.
52 * If tensor has an integer data type then original features are quantised.
53 *
54 * Warning: MFCC calculator provided as input must have the same life scope as returned function.
55 *
56 * @param[in] mfcc MFCC feature calculator.
57 * @param[in,out] inputTensor Input tensor pointer to store calculated features.
58 * @param[in] cacheSize Size of the feature vectors cache (number of feature vectors).
59 * @return Function to be called providing audio sample and sliding window index.
60 */
61 static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
Kshitij Sisodia76a15802021-12-24 11:05:11 +000062 GetFeatureCalculator(audio::MicroNetKwsMFCC& mfcc,
alexander3c798932021-03-26 21:42:19 +000063 TfLiteTensor* inputTensor,
64 size_t cacheSize);
65
66 /* Audio inference handler. */
67 bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
68 {
Isabella Gottardi8df12f32021-04-07 17:15:31 +010069 auto& profiler = ctx.Get<Profiler&>("profiler");
alexander3c798932021-03-26 21:42:19 +000070
71 constexpr uint32_t dataPsnTxtInfStartX = 20;
72 constexpr uint32_t dataPsnTxtInfStartY = 40;
73 constexpr int minTensorDims = static_cast<int>(
Kshitij Sisodia76a15802021-12-24 11:05:11 +000074 (arm::app::MicroNetKwsModel::ms_inputRowsIdx > arm::app::MicroNetKwsModel::ms_inputColsIdx)?
75 arm::app::MicroNetKwsModel::ms_inputRowsIdx : arm::app::MicroNetKwsModel::ms_inputColsIdx);
alexander3c798932021-03-26 21:42:19 +000076
alexander3c798932021-03-26 21:42:19 +000077 auto& model = ctx.Get<Model&>("model");
78
79 /* If the request has a valid size, set the audio index. */
80 if (clipIndex < NUMBER_OF_FILES) {
Éanna Ó Catháin8f958872021-09-15 09:32:30 +010081 if (!SetAppCtxIfmIdx(ctx, clipIndex,"clipIndex")) {
alexander3c798932021-03-26 21:42:19 +000082 return false;
83 }
84 }
85 if (!model.IsInited()) {
86 printf_err("Model is not initialised! Terminating processing.\n");
87 return false;
88 }
89
90 const auto frameLength = ctx.Get<int>("frameLength");
91 const auto frameStride = ctx.Get<int>("frameStride");
92 const auto scoreThreshold = ctx.Get<float>("scoreThreshold");
93 auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
94
95 TfLiteTensor* outputTensor = model.GetOutputTensor(0);
96 TfLiteTensor* inputTensor = model.GetInputTensor(0);
97
98 if (!inputTensor->dims) {
99 printf_err("Invalid input tensor dims\n");
100 return false;
101 } else if (inputTensor->dims->size < minTensorDims) {
102 printf_err("Input tensor dimension should be >= %d\n", minTensorDims);
103 return false;
104 }
105
106 TfLiteIntArray* inputShape = model.GetInputShape(0);
Kshitij Sisodia76a15802021-12-24 11:05:11 +0000107 const uint32_t kNumCols = inputShape->data[arm::app::MicroNetKwsModel::ms_inputColsIdx];
108 const uint32_t kNumRows = inputShape->data[arm::app::MicroNetKwsModel::ms_inputRowsIdx];
alexander3c798932021-03-26 21:42:19 +0000109
Kshitij Sisodia76a15802021-12-24 11:05:11 +0000110 audio::MicroNetKwsMFCC mfcc = audio::MicroNetKwsMFCC(kNumCols, frameLength);
alexander3c798932021-03-26 21:42:19 +0000111 mfcc.Init();
112
113 /* Deduce the data length required for 1 inference from the network parameters. */
114 auto audioDataWindowSize = kNumRows * frameStride + (frameLength - frameStride);
115 auto mfccWindowSize = frameLength;
116 auto mfccWindowStride = frameStride;
117
118 /* We choose to move by half the window size => for a 1 second window size
119 * there is an overlap of 0.5 seconds. */
120 auto audioDataStride = audioDataWindowSize / 2;
121
122 /* To have the previously calculated features re-usable, stride must be multiple
123 * of MFCC features window stride. */
124 if (0 != audioDataStride % mfccWindowStride) {
125
126 /* Reduce the stride. */
127 audioDataStride -= audioDataStride % mfccWindowStride;
128 }
129
130 auto nMfccVectorsInAudioStride = audioDataStride/mfccWindowStride;
131
132 /* We expect to be sampling 1 second worth of data at a time.
133 * NOTE: This is only used for time stamp calculation. */
Kshitij Sisodia76a15802021-12-24 11:05:11 +0000134 const float secondsPerSample = 1.0/audio::MicroNetKwsMFCC::ms_defaultSamplingFreq;
alexander3c798932021-03-26 21:42:19 +0000135
136 do {
Kshitij Sisodia68fdd112022-04-06 13:03:20 +0100137 hal_lcd_clear(COLOR_BLACK);
Richard Burton9b8d67a2021-12-10 12:32:51 +0000138
alexander3c798932021-03-26 21:42:19 +0000139 auto currentIndex = ctx.Get<uint32_t>("clipIndex");
140
141 /* Creating a mfcc features sliding window for the data required for 1 inference. */
142 auto audioMFCCWindowSlider = audio::SlidingWindow<const int16_t>(
143 get_audio_array(currentIndex),
144 audioDataWindowSize, mfccWindowSize,
145 mfccWindowStride);
146
147 /* Creating a sliding window through the whole audio clip. */
148 auto audioDataSlider = audio::SlidingWindow<const int16_t>(
149 get_audio_array(currentIndex),
150 get_audio_array_size(currentIndex),
151 audioDataWindowSize, audioDataStride);
152
153 /* Calculate number of the feature vectors in the window overlap region.
154 * These feature vectors will be reused.*/
155 auto numberOfReusedFeatureVectors = audioMFCCWindowSlider.TotalStrides() + 1
156 - nMfccVectorsInAudioStride;
157
158 /* Construct feature calculation function. */
159 auto mfccFeatureCalc = GetFeatureCalculator(mfcc, inputTensor,
160 numberOfReusedFeatureVectors);
161
162 if (!mfccFeatureCalc){
163 return false;
164 }
165
166 /* Declare a container for results. */
167 std::vector<arm::app::kws::KwsResult> results;
168
169 /* Display message on the LCD - inference running. */
170 std::string str_inf{"Running inference... "};
Kshitij Sisodia68fdd112022-04-06 13:03:20 +0100171 hal_lcd_display_text(
alexander3c798932021-03-26 21:42:19 +0000172 str_inf.c_str(), str_inf.size(),
173 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100174 info("Running inference on audio clip %" PRIu32 " => %s\n", currentIndex,
alexander3c798932021-03-26 21:42:19 +0000175 get_filename(currentIndex));
176
177 /* Start sliding through audio clip. */
178 while (audioDataSlider.HasNext()) {
179 const int16_t *inferenceWindow = audioDataSlider.Next();
180
181 /* We moved to the next window - set the features sliding to the new address. */
182 audioMFCCWindowSlider.Reset(inferenceWindow);
183
184 /* The first window does not have cache ready. */
185 bool useCache = audioDataSlider.Index() > 0 && numberOfReusedFeatureVectors > 0;
186
187 /* Start calculating features inside one audio sliding window. */
188 while (audioMFCCWindowSlider.HasNext()) {
189 const int16_t *mfccWindow = audioMFCCWindowSlider.Next();
190 std::vector<int16_t> mfccAudioData = std::vector<int16_t>(mfccWindow,
191 mfccWindow + mfccWindowSize);
192 /* Compute features for this window and write them to input tensor. */
193 mfccFeatureCalc(mfccAudioData,
194 audioMFCCWindowSlider.Index(),
195 useCache,
196 nMfccVectorsInAudioStride);
197 }
198
199 info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
200 audioDataSlider.TotalStrides() + 1);
201
202 /* Run inference over this audio clip sliding window. */
alexander27b62d92021-05-04 20:46:08 +0100203 if (!RunInference(model, profiler)) {
204 return false;
205 }
alexander3c798932021-03-26 21:42:19 +0000206
207 std::vector<ClassificationResult> classificationResult;
208 auto& classifier = ctx.Get<KwsClassifier&>("classifier");
209 classifier.GetClassificationResults(outputTensor, classificationResult,
Kshitij Sisodia76a15802021-12-24 11:05:11 +0000210 ctx.Get<std::vector<std::string>&>("labels"), 1, true);
alexander3c798932021-03-26 21:42:19 +0000211
212 results.emplace_back(kws::KwsResult(classificationResult,
213 audioDataSlider.Index() * secondsPerSample * audioDataStride,
214 audioDataSlider.Index(), scoreThreshold));
215
216#if VERIFY_TEST_OUTPUT
217 arm::app::DumpTensor(outputTensor);
218#endif /* VERIFY_TEST_OUTPUT */
219 } /* while (audioDataSlider.HasNext()) */
220
221 /* Erase. */
222 str_inf = std::string(str_inf.size(), ' ');
Kshitij Sisodia68fdd112022-04-06 13:03:20 +0100223 hal_lcd_display_text(
alexander3c798932021-03-26 21:42:19 +0000224 str_inf.c_str(), str_inf.size(),
225 dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
226
227 ctx.Set<std::vector<arm::app::kws::KwsResult>>("results", results);
228
Kshitij Sisodia68fdd112022-04-06 13:03:20 +0100229 if (!PresentInferenceResult(results)) {
alexander3c798932021-03-26 21:42:19 +0000230 return false;
231 }
232
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100233 profiler.PrintProfilingResult();
234
Éanna Ó Catháin8f958872021-09-15 09:32:30 +0100235 IncrementAppCtxIfmIdx(ctx,"clipIndex");
alexander3c798932021-03-26 21:42:19 +0000236
237 } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
238
239 return true;
240 }
241
alexander3c798932021-03-26 21:42:19 +0000242 /**
243 * @brief Generic feature calculator factory.
244 *
245 * Returns lambda function to compute features using features cache.
246 * Real features math is done by a lambda function provided as a parameter.
247 * Features are written to input tensor memory.
248 *
Isabella Gottardi56ee6202021-05-12 08:27:15 +0100249 * @tparam T Feature vector type.
250 * @param[in] inputTensor Model input tensor pointer.
251 * @param[in] cacheSize Number of feature vectors to cache. Defined by the sliding window overlap.
252 * @param[in] compute Features calculator function.
253 * @return Lambda function to compute features.
alexander3c798932021-03-26 21:42:19 +0000254 */
255 template<class T>
256 std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
alexanderc350cdc2021-04-29 20:36:09 +0100257 FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
258 std::function<std::vector<T> (std::vector<int16_t>& )> compute)
alexander3c798932021-03-26 21:42:19 +0000259 {
260 /* Feature cache to be captured by lambda function. */
261 static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
262
263 return [=](std::vector<int16_t>& audioDataWindow,
264 size_t index,
265 bool useCache,
266 size_t featuresOverlapIndex)
267 {
268 T *tensorData = tflite::GetTensorData<T>(inputTensor);
269 std::vector<T> features;
270
271 /* Reuse features from cache if cache is ready and sliding windows overlap.
272 * Overlap is in the beginning of sliding window with a size of a feature cache. */
273 if (useCache && index < featureCache.size()) {
274 features = std::move(featureCache[index]);
275 } else {
276 features = std::move(compute(audioDataWindow));
277 }
278 auto size = features.size();
279 auto sizeBytes = sizeof(T) * size;
280 std::memcpy(tensorData + (index * size), features.data(), sizeBytes);
281
282 /* Start renewing cache as soon iteration goes out of the windows overlap. */
283 if (index >= featuresOverlapIndex) {
284 featureCache[index - featuresOverlapIndex] = std::move(features);
285 }
286 };
287 }
288
Kshitij Sisodia68fdd112022-04-06 13:03:20 +0100289 static bool PresentInferenceResult(const std::vector<arm::app::kws::KwsResult>& results)
290 {
291 constexpr uint32_t dataPsnTxtStartX1 = 20;
292 constexpr uint32_t dataPsnTxtStartY1 = 30;
293 constexpr uint32_t dataPsnTxtYIncr = 16; /* Row index increment. */
294
295 hal_lcd_set_text_color(COLOR_GREEN);
296 info("Final results:\n");
297 info("Total number of inferences: %zu\n", results.size());
298
299 /* Display each result */
300 uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
301
302 for (uint32_t i = 0; i < results.size(); ++i) {
303
304 std::string topKeyword{"<none>"};
305 float score = 0.f;
306 if (!results[i].m_resultVec.empty()) {
307 topKeyword = results[i].m_resultVec[0].m_label;
308 score = results[i].m_resultVec[0].m_normalisedVal;
309 }
310
311 std::string resultStr =
312 std::string{"@"} + std::to_string(results[i].m_timeStamp) +
313 std::string{"s: "} + topKeyword + std::string{" ("} +
314 std::to_string(static_cast<int>(score * 100)) + std::string{"%)"};
315
316 hal_lcd_display_text(
317 resultStr.c_str(), resultStr.size(),
318 dataPsnTxtStartX1, rowIdx1, false);
319 rowIdx1 += dataPsnTxtYIncr;
320
321 if (results[i].m_resultVec.empty()) {
322 info("For timestamp: %f (inference #: %" PRIu32
323 "); label: %s; threshold: %f\n",
324 results[i].m_timeStamp, results[i].m_inferenceNumber,
325 topKeyword.c_str(),
326 results[i].m_threshold);
327 } else {
328 for (uint32_t j = 0; j < results[i].m_resultVec.size(); ++j) {
329 info("For timestamp: %f (inference #: %" PRIu32
330 "); label: %s, score: %f; threshold: %f\n",
331 results[i].m_timeStamp,
332 results[i].m_inferenceNumber,
333 results[i].m_resultVec[j].m_label.c_str(),
334 results[i].m_resultVec[j].m_normalisedVal,
335 results[i].m_threshold);
336 }
337 }
338 }
339
340 return true;
341 }
342
alexander3c798932021-03-26 21:42:19 +0000343 template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
alexanderc350cdc2021-04-29 20:36:09 +0100344 FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
alexander3c798932021-03-26 21:42:19 +0000345 size_t cacheSize,
346 std::function<std::vector<int8_t> (std::vector<int16_t>& )> compute);
347
348 template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
alexanderc350cdc2021-04-29 20:36:09 +0100349 FeatureCalc<uint8_t>(TfLiteTensor* inputTensor,
350 size_t cacheSize,
351 std::function<std::vector<uint8_t> (std::vector<int16_t>& )> compute);
alexander3c798932021-03-26 21:42:19 +0000352
353 template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
alexanderc350cdc2021-04-29 20:36:09 +0100354 FeatureCalc<int16_t>(TfLiteTensor* inputTensor,
355 size_t cacheSize,
356 std::function<std::vector<int16_t> (std::vector<int16_t>& )> compute);
alexander3c798932021-03-26 21:42:19 +0000357
358 template std::function<void(std::vector<int16_t>&, size_t, bool, size_t)>
alexanderc350cdc2021-04-29 20:36:09 +0100359 FeatureCalc<float>(TfLiteTensor* inputTensor,
360 size_t cacheSize,
361 std::function<std::vector<float>(std::vector<int16_t>&)> compute);
alexander3c798932021-03-26 21:42:19 +0000362
363
364 static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
Kshitij Sisodia76a15802021-12-24 11:05:11 +0000365 GetFeatureCalculator(audio::MicroNetKwsMFCC& mfcc, TfLiteTensor* inputTensor, size_t cacheSize)
alexander3c798932021-03-26 21:42:19 +0000366 {
367 std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> mfccFeatureCalc;
368
369 TfLiteQuantization quant = inputTensor->quantization;
370
371 if (kTfLiteAffineQuantization == quant.type) {
372
373 auto *quantParams = (TfLiteAffineQuantization *) quant.params;
374 const float quantScale = quantParams->scale->data[0];
375 const int quantOffset = quantParams->zero_point->data[0];
376
377 switch (inputTensor->type) {
378 case kTfLiteInt8: {
alexanderc350cdc2021-04-29 20:36:09 +0100379 mfccFeatureCalc = FeatureCalc<int8_t>(inputTensor,
380 cacheSize,
381 [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
382 return mfcc.MfccComputeQuant<int8_t>(audioDataWindow,
383 quantScale,
384 quantOffset);
385 }
alexander3c798932021-03-26 21:42:19 +0000386 );
387 break;
388 }
389 case kTfLiteUInt8: {
alexanderc350cdc2021-04-29 20:36:09 +0100390 mfccFeatureCalc = FeatureCalc<uint8_t>(inputTensor,
391 cacheSize,
alexander3c798932021-03-26 21:42:19 +0000392 [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
393 return mfcc.MfccComputeQuant<uint8_t>(audioDataWindow,
394 quantScale,
395 quantOffset);
396 }
397 );
398 break;
399 }
400 case kTfLiteInt16: {
alexanderc350cdc2021-04-29 20:36:09 +0100401 mfccFeatureCalc = FeatureCalc<int16_t>(inputTensor,
402 cacheSize,
403 [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
404 return mfcc.MfccComputeQuant<int16_t>(audioDataWindow,
405 quantScale,
406 quantOffset);
407 }
alexander3c798932021-03-26 21:42:19 +0000408 );
409 break;
410 }
411 default:
412 printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
413 }
414
415
416 } else {
alexanderc350cdc2021-04-29 20:36:09 +0100417 mfccFeatureCalc = mfccFeatureCalc = FeatureCalc<float>(inputTensor,
418 cacheSize,
419 [&mfcc](std::vector<int16_t>& audioDataWindow) {
420 return mfcc.MfccCompute(audioDataWindow);
421 });
alexander3c798932021-03-26 21:42:19 +0000422 }
423 return mfccFeatureCalc;
424 }
425
426} /* namespace app */
427} /* namespace arm */