blob: 43b17dc5d7090623142e96800904e96bd97972eb [file] [log] [blame]
alexander3c798932021-03-26 21:42:19 +00001/*
2 * Copyright (c) 2021 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "UseCaseHandler.hpp"
18
19#include "InputFiles.hpp"
20#include "AsrClassifier.hpp"
21#include "Wav2LetterModel.hpp"
22#include "hal.h"
23#include "Wav2LetterMfcc.hpp"
24#include "AudioUtils.hpp"
25#include "UseCaseCommonUtils.hpp"
26#include "AsrResult.hpp"
27#include "Wav2LetterPreprocess.hpp"
28#include "Wav2LetterPostprocess.hpp"
29#include "OutputDecode.hpp"
30
31namespace arm {
32namespace app {
33
34 /**
35 * @brief Helper function to increment current audio clip index.
36 * @param[in,out] ctx Pointer to the application context object.
37 **/
alexanderc350cdc2021-04-29 20:36:09 +010038 static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
alexander3c798932021-03-26 21:42:19 +000039
40 /**
41 * @brief Helper function to set the audio clip index.
42 * @param[in,out] ctx Pointer to the application context object.
43 * @param[in] idx Value to be set.
44 * @return true if index is set, false otherwise.
45 **/
alexanderc350cdc2021-04-29 20:36:09 +010046 static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
alexander3c798932021-03-26 21:42:19 +000047
48 /**
49 * @brief Presents inference results using the data presentation
50 * object.
51 * @param[in] platform Reference to the hal platform object.
52 * @param[in] results Vector of classification results to be displayed.
53 * @param[in] infTimeMs Inference time in milliseconds, if available
54 * otherwise, this can be passed in as 0.
55 * @return true if successful, false otherwise.
56 **/
alexanderc350cdc2021-04-29 20:36:09 +010057 static bool PresentInferenceResult(
alexander3c798932021-03-26 21:42:19 +000058 hal_platform& platform,
59 const std::vector<arm::app::asr::AsrResult>& results);
60
61 /* Audio inference classification handler. */
62 bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
63 {
64 constexpr uint32_t dataPsnTxtInfStartX = 20;
65 constexpr uint32_t dataPsnTxtInfStartY = 40;
66
67 auto& platform = ctx.Get<hal_platform&>("platform");
68 platform.data_psn->clear(COLOR_BLACK);
69
Isabella Gottardi8df12f32021-04-07 17:15:31 +010070 auto& profiler = ctx.Get<Profiler&>("profiler");
71
alexander3c798932021-03-26 21:42:19 +000072 /* If the request has a valid size, set the audio index. */
73 if (clipIndex < NUMBER_OF_FILES) {
alexanderc350cdc2021-04-29 20:36:09 +010074 if (!SetAppCtxClipIdx(ctx, clipIndex)) {
alexander3c798932021-03-26 21:42:19 +000075 return false;
76 }
77 }
78
79 /* Get model reference. */
80 auto& model = ctx.Get<Model&>("model");
81 if (!model.IsInited()) {
82 printf_err("Model is not initialised! Terminating processing.\n");
83 return false;
84 }
85
86 /* Get score threshold to be applied for the classifier (post-inference). */
87 auto scoreThreshold = ctx.Get<float>("scoreThreshold");
88
89 /* Get tensors. Dimensions of the tensor should have been verified by
90 * the callee. */
91 TfLiteTensor* inputTensor = model.GetInputTensor(0);
92 TfLiteTensor* outputTensor = model.GetOutputTensor(0);
93 const uint32_t inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
94
95 /* Populate MFCC related parameters. */
96 auto mfccParamsWinLen = ctx.Get<uint32_t>("frameLength");
97 auto mfccParamsWinStride = ctx.Get<uint32_t>("frameStride");
98
99 /* Populate ASR inference context and inner lengths for input. */
100 auto inputCtxLen = ctx.Get<uint32_t>("ctxLen");
101 const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
102
103 /* Audio data stride corresponds to inputInnerLen feature vectors. */
104 const uint32_t audioParamsWinLen = (inputRows - 1) * mfccParamsWinStride + (mfccParamsWinLen);
105 const uint32_t audioParamsWinStride = inputInnerLen * mfccParamsWinStride;
106 const float audioParamsSecondsPerSample = (1.0/audio::Wav2LetterMFCC::ms_defaultSamplingFreq);
107
108 /* Get pre/post-processing objects. */
109 auto& prep = ctx.Get<audio::asr::Preprocess&>("preprocess");
110 auto& postp = ctx.Get<audio::asr::Postprocess&>("postprocess");
111
112 /* Set default reduction axis for post-processing. */
113 const uint32_t reductionAxis = arm::app::Wav2LetterModel::ms_outputRowsIdx;
114
115 /* Audio clip start index. */
116 auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
117
118 /* Loop to process audio clips. */
119 do {
120 /* Get current audio clip index. */
121 auto currentIndex = ctx.Get<uint32_t>("clipIndex");
122
123 /* Get the current audio buffer and respective size. */
124 const int16_t* audioArr = get_audio_array(currentIndex);
125 const uint32_t audioArrSize = get_audio_array_size(currentIndex);
126
127 if (!audioArr) {
128 printf_err("Invalid audio array pointer\n");
129 return false;
130 }
131
132 /* Audio clip must have enough samples to produce 1 MFCC feature. */
133 if (audioArrSize < mfccParamsWinLen) {
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100134 printf_err("Not enough audio samples, minimum needed is %" PRIu32 "\n",
135 mfccParamsWinLen);
alexander3c798932021-03-26 21:42:19 +0000136 return false;
137 }
138
139 /* Initialise an audio slider. */
140 auto audioDataSlider = audio::ASRSlidingWindow<const int16_t>(
141 audioArr,
142 audioArrSize,
143 audioParamsWinLen,
144 audioParamsWinStride);
145
146 /* Declare a container for results. */
147 std::vector<arm::app::asr::AsrResult> results;
148
149 /* Display message on the LCD - inference running. */
150 std::string str_inf{"Running inference... "};
151 platform.data_psn->present_data_text(
152 str_inf.c_str(), str_inf.size(),
153 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
154
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100155 info("Running inference on audio clip %" PRIu32 " => %s\n", currentIndex,
alexander3c798932021-03-26 21:42:19 +0000156 get_filename(currentIndex));
157
158 size_t inferenceWindowLen = audioParamsWinLen;
159
160 /* Start sliding through audio clip. */
161 while (audioDataSlider.HasNext()) {
162
163 /* If not enough audio see how much can be sent for processing. */
164 size_t nextStartIndex = audioDataSlider.NextWindowStartIndex();
165 if (nextStartIndex + audioParamsWinLen > audioArrSize) {
166 inferenceWindowLen = audioArrSize - nextStartIndex;
167 }
168
169 const int16_t* inferenceWindow = audioDataSlider.Next();
170
171 info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
172 static_cast<size_t>(ceilf(audioDataSlider.FractionalTotalStrides() + 1)));
173
alexander3c798932021-03-26 21:42:19 +0000174 /* Calculate MFCCs, deltas and populate the input tensor. */
175 prep.Invoke(inferenceWindow, inferenceWindowLen, inputTensor);
176
alexander3c798932021-03-26 21:42:19 +0000177 /* Run inference over this audio clip sliding window. */
alexander27b62d92021-05-04 20:46:08 +0100178 if (!RunInference(model, profiler)) {
179 return false;
180 }
alexander3c798932021-03-26 21:42:19 +0000181
182 /* Post-process. */
183 postp.Invoke(outputTensor, reductionAxis, !audioDataSlider.HasNext());
184
185 /* Get results. */
186 std::vector<ClassificationResult> classificationResult;
187 auto& classifier = ctx.Get<AsrClassifier&>("classifier");
188 classifier.GetClassificationResults(
189 outputTensor, classificationResult,
190 ctx.Get<std::vector<std::string>&>("labels"), 1);
191
192 results.emplace_back(asr::AsrResult(classificationResult,
193 (audioDataSlider.Index() *
194 audioParamsSecondsPerSample *
195 audioParamsWinStride),
196 audioDataSlider.Index(), scoreThreshold));
197
198#if VERIFY_TEST_OUTPUT
199 arm::app::DumpTensor(outputTensor,
200 outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx]);
201#endif /* VERIFY_TEST_OUTPUT */
202
203 }
204
205 /* Erase. */
206 str_inf = std::string(str_inf.size(), ' ');
207 platform.data_psn->present_data_text(
208 str_inf.c_str(), str_inf.size(),
209 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
210
211 ctx.Set<std::vector<arm::app::asr::AsrResult>>("results", results);
212
alexanderc350cdc2021-04-29 20:36:09 +0100213 if (!PresentInferenceResult(platform, results)) {
alexander3c798932021-03-26 21:42:19 +0000214 return false;
215 }
216
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100217 profiler.PrintProfilingResult();
218
alexanderc350cdc2021-04-29 20:36:09 +0100219 IncrementAppCtxClipIdx(ctx);
alexander3c798932021-03-26 21:42:19 +0000220
221 } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
222
223 return true;
224 }
225
alexanderc350cdc2021-04-29 20:36:09 +0100226 static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
alexander3c798932021-03-26 21:42:19 +0000227 {
228 auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
229
230 if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
231 ctx.Set<uint32_t>("clipIndex", 0);
232 return;
233 }
234 ++curAudioIdx;
235 ctx.Set<uint32_t>("clipIndex", curAudioIdx);
236 }
237
alexanderc350cdc2021-04-29 20:36:09 +0100238 static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
alexander3c798932021-03-26 21:42:19 +0000239 {
240 if (idx >= NUMBER_OF_FILES) {
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100241 printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
alexander3c798932021-03-26 21:42:19 +0000242 idx, NUMBER_OF_FILES);
243 return false;
244 }
245
246 ctx.Set<uint32_t>("clipIndex", idx);
247 return true;
248 }
249
alexanderc350cdc2021-04-29 20:36:09 +0100250 static bool PresentInferenceResult(hal_platform& platform,
251 const std::vector<arm::app::asr::AsrResult>& results)
alexander3c798932021-03-26 21:42:19 +0000252 {
253 constexpr uint32_t dataPsnTxtStartX1 = 20;
254 constexpr uint32_t dataPsnTxtStartY1 = 60;
255 constexpr bool allow_multiple_lines = true;
256
257 platform.data_psn->set_text_color(COLOR_GREEN);
258
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100259 info("Final results:\n");
260 info("Total number of inferences: %zu\n", results.size());
alexander3c798932021-03-26 21:42:19 +0000261 /* Results from multiple inferences should be combined before processing. */
262 std::vector<arm::app::ClassificationResult> combinedResults;
263 for (auto& result : results) {
264 combinedResults.insert(combinedResults.end(),
265 result.m_resultVec.begin(),
266 result.m_resultVec.end());
267 }
268
269 /* Get each inference result string using the decoder. */
270 for (const auto & result : results) {
271 std::string infResultStr = audio::asr::DecodeOutput(result.m_resultVec);
272
Kshitij Sisodiaf9c19ea2021-05-07 16:08:14 +0100273 info("For timestamp: %f (inference #: %" PRIu32 "); label: %s\n",
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100274 result.m_timeStamp, result.m_inferenceNumber,
275 infResultStr.c_str());
alexander3c798932021-03-26 21:42:19 +0000276 }
277
278 /* Get the decoded result for the combined result. */
279 std::string finalResultStr = audio::asr::DecodeOutput(combinedResults);
280
281 platform.data_psn->present_data_text(
282 finalResultStr.c_str(), finalResultStr.size(),
283 dataPsnTxtStartX1, dataPsnTxtStartY1,
284 allow_multiple_lines);
285
Isabella Gottardi8df12f32021-04-07 17:15:31 +0100286 info("Complete recognition: %s\n", finalResultStr.c_str());
alexander3c798932021-03-26 21:42:19 +0000287 return true;
288 }
289
290} /* namespace app */
291} /* namespace arm */