blob: 8ef318fc09bd945e2789f1a0b23df648d85bd1d2 [file] [log] [blame]
alexander3c798932021-03-26 21:42:19 +00001/*
2 * Copyright (c) 2021 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "UseCaseHandler.hpp"
18
19#include "InputFiles.hpp"
20#include "AsrClassifier.hpp"
21#include "Wav2LetterModel.hpp"
22#include "hal.h"
23#include "Wav2LetterMfcc.hpp"
24#include "AudioUtils.hpp"
25#include "UseCaseCommonUtils.hpp"
26#include "AsrResult.hpp"
27#include "Wav2LetterPreprocess.hpp"
28#include "Wav2LetterPostprocess.hpp"
29#include "OutputDecode.hpp"
30
31namespace arm {
32namespace app {
33
34 /**
35 * @brief Helper function to increment current audio clip index.
36 * @param[in,out] ctx Pointer to the application context object.
37 **/
alexanderc350cdc2021-04-29 20:36:09 +010038 static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
alexander3c798932021-03-26 21:42:19 +000039
40 /**
41 * @brief Helper function to set the audio clip index.
42 * @param[in,out] ctx Pointer to the application context object.
43 * @param[in] idx Value to be set.
44 * @return true if index is set, false otherwise.
45 **/
alexanderc350cdc2021-04-29 20:36:09 +010046 static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
alexander3c798932021-03-26 21:42:19 +000047
48 /**
49 * @brief Presents inference results using the data presentation
50 * object.
51 * @param[in] platform Reference to the hal platform object.
52 * @param[in] results Vector of classification results to be displayed.
alexander3c798932021-03-26 21:42:19 +000053 * @return true if successful, false otherwise.
54 **/
alexanderc350cdc2021-04-29 20:36:09 +010055 static bool PresentInferenceResult(
alexander3c798932021-03-26 21:42:19 +000056 hal_platform& platform,
57 const std::vector<arm::app::asr::AsrResult>& results);
58
59 /* Audio inference classification handler. */
60 bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
61 {
62 constexpr uint32_t dataPsnTxtInfStartX = 20;
63 constexpr uint32_t dataPsnTxtInfStartY = 40;
64
65 auto& platform = ctx.Get<hal_platform&>("platform");
66 platform.data_psn->clear(COLOR_BLACK);
67
Isabella Gottardi8df12f32021-04-07 17:15:31 +010068 auto& profiler = ctx.Get<Profiler&>("profiler");
69
alexander3c798932021-03-26 21:42:19 +000070 /* If the request has a valid size, set the audio index. */
71 if (clipIndex < NUMBER_OF_FILES) {
alexanderc350cdc2021-04-29 20:36:09 +010072 if (!SetAppCtxClipIdx(ctx, clipIndex)) {
alexander3c798932021-03-26 21:42:19 +000073 return false;
74 }
75 }
76
77 /* Get model reference. */
78 auto& model = ctx.Get<Model&>("model");
79 if (!model.IsInited()) {
80 printf_err("Model is not initialised! Terminating processing.\n");
81 return false;
82 }
83
84 /* Get score threshold to be applied for the classifier (post-inference). */
85 auto scoreThreshold = ctx.Get<float>("scoreThreshold");
86
87 /* Get tensors. Dimensions of the tensor should have been verified by
88 * the callee. */
89 TfLiteTensor* inputTensor = model.GetInputTensor(0);
90 TfLiteTensor* outputTensor = model.GetOutputTensor(0);
91 const uint32_t inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
92
93 /* Populate MFCC related parameters. */
94 auto mfccParamsWinLen = ctx.Get<uint32_t>("frameLength");
95 auto mfccParamsWinStride = ctx.Get<uint32_t>("frameStride");
96
97 /* Populate ASR inference context and inner lengths for input. */
98 auto inputCtxLen = ctx.Get<uint32_t>("ctxLen");
99 const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
100
101 /* Audio data stride corresponds to inputInnerLen feature vectors. */
102 const uint32_t audioParamsWinLen = (inputRows - 1) * mfccParamsWinStride + (mfccParamsWinLen);
103 const uint32_t audioParamsWinStride = inputInnerLen * mfccParamsWinStride;
104 const float audioParamsSecondsPerSample = (1.0/audio::Wav2LetterMFCC::ms_defaultSamplingFreq);
105
106 /* Get pre/post-processing objects. */
107 auto& prep = ctx.Get<audio::asr::Preprocess&>("preprocess");
108 auto& postp = ctx.Get<audio::asr::Postprocess&>("postprocess");
109
110 /* Set default reduction axis for post-processing. */
111 const uint32_t reductionAxis = arm::app::Wav2LetterModel::ms_outputRowsIdx;
112
113 /* Audio clip start index. */
114 auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
115
116 /* Loop to process audio clips. */
117 do {
118 /* Get current audio clip index. */
119 auto currentIndex = ctx.Get<uint32_t>("clipIndex");
120
121 /* Get the current audio buffer and respective size. */
122 const int16_t* audioArr = get_audio_array(currentIndex);
123 const uint32_t audioArrSize = get_audio_array_size(currentIndex);
124
125 if (!audioArr) {
126 printf_err("Invalid audio array pointer\n");
127 return false;
128 }
129
130 /* Audio clip must have enough samples to produce 1 MFCC feature. */
131 if (audioArrSize < mfccParamsWinLen) {
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100132 printf_err("Not enough audio samples, minimum needed is %" PRIu32 "\n",
133 mfccParamsWinLen);
alexander3c798932021-03-26 21:42:19 +0000134 return false;
135 }
136
137 /* Initialise an audio slider. */
alexander80eecfb2021-07-06 19:47:59 +0100138 auto audioDataSlider = audio::FractionalSlidingWindow<const int16_t>(
alexander3c798932021-03-26 21:42:19 +0000139 audioArr,
140 audioArrSize,
141 audioParamsWinLen,
142 audioParamsWinStride);
143
144 /* Declare a container for results. */
145 std::vector<arm::app::asr::AsrResult> results;
146
147 /* Display message on the LCD - inference running. */
148 std::string str_inf{"Running inference... "};
149 platform.data_psn->present_data_text(
150 str_inf.c_str(), str_inf.size(),
151 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
152
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100153 info("Running inference on audio clip %" PRIu32 " => %s\n", currentIndex,
alexander3c798932021-03-26 21:42:19 +0000154 get_filename(currentIndex));
155
156 size_t inferenceWindowLen = audioParamsWinLen;
157
158 /* Start sliding through audio clip. */
159 while (audioDataSlider.HasNext()) {
160
161 /* If not enough audio see how much can be sent for processing. */
162 size_t nextStartIndex = audioDataSlider.NextWindowStartIndex();
163 if (nextStartIndex + audioParamsWinLen > audioArrSize) {
164 inferenceWindowLen = audioArrSize - nextStartIndex;
165 }
166
167 const int16_t* inferenceWindow = audioDataSlider.Next();
168
169 info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
170 static_cast<size_t>(ceilf(audioDataSlider.FractionalTotalStrides() + 1)));
171
alexander3c798932021-03-26 21:42:19 +0000172 /* Calculate MFCCs, deltas and populate the input tensor. */
173 prep.Invoke(inferenceWindow, inferenceWindowLen, inputTensor);
174
alexander3c798932021-03-26 21:42:19 +0000175 /* Run inference over this audio clip sliding window. */
alexander27b62d92021-05-04 20:46:08 +0100176 if (!RunInference(model, profiler)) {
177 return false;
178 }
alexander3c798932021-03-26 21:42:19 +0000179
180 /* Post-process. */
181 postp.Invoke(outputTensor, reductionAxis, !audioDataSlider.HasNext());
182
183 /* Get results. */
184 std::vector<ClassificationResult> classificationResult;
185 auto& classifier = ctx.Get<AsrClassifier&>("classifier");
186 classifier.GetClassificationResults(
187 outputTensor, classificationResult,
188 ctx.Get<std::vector<std::string>&>("labels"), 1);
189
190 results.emplace_back(asr::AsrResult(classificationResult,
191 (audioDataSlider.Index() *
192 audioParamsSecondsPerSample *
193 audioParamsWinStride),
194 audioDataSlider.Index(), scoreThreshold));
195
196#if VERIFY_TEST_OUTPUT
197 arm::app::DumpTensor(outputTensor,
198 outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx]);
199#endif /* VERIFY_TEST_OUTPUT */
200
201 }
202
203 /* Erase. */
204 str_inf = std::string(str_inf.size(), ' ');
205 platform.data_psn->present_data_text(
206 str_inf.c_str(), str_inf.size(),
207 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
208
209 ctx.Set<std::vector<arm::app::asr::AsrResult>>("results", results);
210
alexanderc350cdc2021-04-29 20:36:09 +0100211 if (!PresentInferenceResult(platform, results)) {
alexander3c798932021-03-26 21:42:19 +0000212 return false;
213 }
214
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100215 profiler.PrintProfilingResult();
216
alexanderc350cdc2021-04-29 20:36:09 +0100217 IncrementAppCtxClipIdx(ctx);
alexander3c798932021-03-26 21:42:19 +0000218
219 } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
220
221 return true;
222 }
223
alexanderc350cdc2021-04-29 20:36:09 +0100224 static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
alexander3c798932021-03-26 21:42:19 +0000225 {
226 auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
227
228 if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
229 ctx.Set<uint32_t>("clipIndex", 0);
230 return;
231 }
232 ++curAudioIdx;
233 ctx.Set<uint32_t>("clipIndex", curAudioIdx);
234 }
235
alexanderc350cdc2021-04-29 20:36:09 +0100236 static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
alexander3c798932021-03-26 21:42:19 +0000237 {
238 if (idx >= NUMBER_OF_FILES) {
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100239 printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
alexander3c798932021-03-26 21:42:19 +0000240 idx, NUMBER_OF_FILES);
241 return false;
242 }
243
244 ctx.Set<uint32_t>("clipIndex", idx);
245 return true;
246 }
247
alexanderc350cdc2021-04-29 20:36:09 +0100248 static bool PresentInferenceResult(hal_platform& platform,
249 const std::vector<arm::app::asr::AsrResult>& results)
alexander3c798932021-03-26 21:42:19 +0000250 {
251 constexpr uint32_t dataPsnTxtStartX1 = 20;
252 constexpr uint32_t dataPsnTxtStartY1 = 60;
253 constexpr bool allow_multiple_lines = true;
254
255 platform.data_psn->set_text_color(COLOR_GREEN);
256
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100257 info("Final results:\n");
258 info("Total number of inferences: %zu\n", results.size());
alexander3c798932021-03-26 21:42:19 +0000259 /* Results from multiple inferences should be combined before processing. */
260 std::vector<arm::app::ClassificationResult> combinedResults;
261 for (auto& result : results) {
262 combinedResults.insert(combinedResults.end(),
263 result.m_resultVec.begin(),
264 result.m_resultVec.end());
265 }
266
267 /* Get each inference result string using the decoder. */
268 for (const auto & result : results) {
269 std::string infResultStr = audio::asr::DecodeOutput(result.m_resultVec);
270
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100271 info("For timestamp: %f (inference #: %" PRIu32 "); label: %s\n",
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100272 result.m_timeStamp, result.m_inferenceNumber,
273 infResultStr.c_str());
alexander3c798932021-03-26 21:42:19 +0000274 }
275
276 /* Get the decoded result for the combined result. */
277 std::string finalResultStr = audio::asr::DecodeOutput(combinedResults);
278
279 platform.data_psn->present_data_text(
280 finalResultStr.c_str(), finalResultStr.size(),
281 dataPsnTxtStartX1, dataPsnTxtStartY1,
282 allow_multiple_lines);
283
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100284 info("Complete recognition: %s\n", finalResultStr.c_str());
alexander3c798932021-03-26 21:42:19 +0000285 return true;
286 }
287
288} /* namespace app */
289} /* namespace arm */