blob: e706eb8b297b1209af1113f19e205744d4734e87 [file] [log] [blame]
alexander3c798932021-03-26 21:42:19 +00001/*
2 * Copyright (c) 2021 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "UseCaseHandler.hpp"
18
19#include "InputFiles.hpp"
20#include "AsrClassifier.hpp"
21#include "Wav2LetterModel.hpp"
22#include "hal.h"
23#include "Wav2LetterMfcc.hpp"
24#include "AudioUtils.hpp"
25#include "UseCaseCommonUtils.hpp"
26#include "AsrResult.hpp"
27#include "Wav2LetterPreprocess.hpp"
28#include "Wav2LetterPostprocess.hpp"
29#include "OutputDecode.hpp"
30
31namespace arm {
32namespace app {
33
34 /**
35 * @brief Helper function to increment current audio clip index.
36 * @param[in,out] ctx Pointer to the application context object.
37 **/
38 static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
39
40 /**
41 * @brief Helper function to set the audio clip index.
42 * @param[in,out] ctx Pointer to the application context object.
43 * @param[in] idx Value to be set.
44 * @return true if index is set, false otherwise.
45 **/
46 static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
47
48 /**
49 * @brief Presents inference results using the data presentation
50 * object.
51 * @param[in] platform Reference to the hal platform object.
52 * @param[in] results Vector of classification results to be displayed.
53 * @param[in] infTimeMs Inference time in milliseconds, if available
54 * otherwise, this can be passed in as 0.
55 * @return true if successful, false otherwise.
56 **/
57 static bool _PresentInferenceResult(
58 hal_platform& platform,
59 const std::vector<arm::app::asr::AsrResult>& results);
60
61 /* Audio inference classification handler. */
62 bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
63 {
64 constexpr uint32_t dataPsnTxtInfStartX = 20;
65 constexpr uint32_t dataPsnTxtInfStartY = 40;
66
67 auto& platform = ctx.Get<hal_platform&>("platform");
68 platform.data_psn->clear(COLOR_BLACK);
69
70 /* If the request has a valid size, set the audio index. */
71 if (clipIndex < NUMBER_OF_FILES) {
72 if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
73 return false;
74 }
75 }
76
77 /* Get model reference. */
78 auto& model = ctx.Get<Model&>("model");
79 if (!model.IsInited()) {
80 printf_err("Model is not initialised! Terminating processing.\n");
81 return false;
82 }
83
84 /* Get score threshold to be applied for the classifier (post-inference). */
85 auto scoreThreshold = ctx.Get<float>("scoreThreshold");
86
87 /* Get tensors. Dimensions of the tensor should have been verified by
88 * the callee. */
89 TfLiteTensor* inputTensor = model.GetInputTensor(0);
90 TfLiteTensor* outputTensor = model.GetOutputTensor(0);
91 const uint32_t inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
92
93 /* Populate MFCC related parameters. */
94 auto mfccParamsWinLen = ctx.Get<uint32_t>("frameLength");
95 auto mfccParamsWinStride = ctx.Get<uint32_t>("frameStride");
96
97 /* Populate ASR inference context and inner lengths for input. */
98 auto inputCtxLen = ctx.Get<uint32_t>("ctxLen");
99 const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
100
101 /* Audio data stride corresponds to inputInnerLen feature vectors. */
102 const uint32_t audioParamsWinLen = (inputRows - 1) * mfccParamsWinStride + (mfccParamsWinLen);
103 const uint32_t audioParamsWinStride = inputInnerLen * mfccParamsWinStride;
104 const float audioParamsSecondsPerSample = (1.0/audio::Wav2LetterMFCC::ms_defaultSamplingFreq);
105
106 /* Get pre/post-processing objects. */
107 auto& prep = ctx.Get<audio::asr::Preprocess&>("preprocess");
108 auto& postp = ctx.Get<audio::asr::Postprocess&>("postprocess");
109
110 /* Set default reduction axis for post-processing. */
111 const uint32_t reductionAxis = arm::app::Wav2LetterModel::ms_outputRowsIdx;
112
113 /* Audio clip start index. */
114 auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
115
116 /* Loop to process audio clips. */
117 do {
118 /* Get current audio clip index. */
119 auto currentIndex = ctx.Get<uint32_t>("clipIndex");
120
121 /* Get the current audio buffer and respective size. */
122 const int16_t* audioArr = get_audio_array(currentIndex);
123 const uint32_t audioArrSize = get_audio_array_size(currentIndex);
124
125 if (!audioArr) {
126 printf_err("Invalid audio array pointer\n");
127 return false;
128 }
129
130 /* Audio clip must have enough samples to produce 1 MFCC feature. */
131 if (audioArrSize < mfccParamsWinLen) {
132 printf_err("Not enough audio samples, minimum needed is %u\n", mfccParamsWinLen);
133 return false;
134 }
135
136 /* Initialise an audio slider. */
137 auto audioDataSlider = audio::ASRSlidingWindow<const int16_t>(
138 audioArr,
139 audioArrSize,
140 audioParamsWinLen,
141 audioParamsWinStride);
142
143 /* Declare a container for results. */
144 std::vector<arm::app::asr::AsrResult> results;
145
146 /* Display message on the LCD - inference running. */
147 std::string str_inf{"Running inference... "};
148 platform.data_psn->present_data_text(
149 str_inf.c_str(), str_inf.size(),
150 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
151
152 info("Running inference on audio clip %u => %s\n", currentIndex,
153 get_filename(currentIndex));
154
155 size_t inferenceWindowLen = audioParamsWinLen;
156
157 /* Start sliding through audio clip. */
158 while (audioDataSlider.HasNext()) {
159
160 /* If not enough audio see how much can be sent for processing. */
161 size_t nextStartIndex = audioDataSlider.NextWindowStartIndex();
162 if (nextStartIndex + audioParamsWinLen > audioArrSize) {
163 inferenceWindowLen = audioArrSize - nextStartIndex;
164 }
165
166 const int16_t* inferenceWindow = audioDataSlider.Next();
167
168 info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
169 static_cast<size_t>(ceilf(audioDataSlider.FractionalTotalStrides() + 1)));
170
171 Profiler prepProfiler{&platform, "pre-processing"};
172 prepProfiler.StartProfiling();
173
174 /* Calculate MFCCs, deltas and populate the input tensor. */
175 prep.Invoke(inferenceWindow, inferenceWindowLen, inputTensor);
176
177 prepProfiler.StopProfiling();
178 std::string prepProfileResults = prepProfiler.GetResultsAndReset();
179 info("%s\n", prepProfileResults.c_str());
180
181 /* Run inference over this audio clip sliding window. */
182 arm::app::RunInference(platform, model);
183
184 /* Post-process. */
185 postp.Invoke(outputTensor, reductionAxis, !audioDataSlider.HasNext());
186
187 /* Get results. */
188 std::vector<ClassificationResult> classificationResult;
189 auto& classifier = ctx.Get<AsrClassifier&>("classifier");
190 classifier.GetClassificationResults(
191 outputTensor, classificationResult,
192 ctx.Get<std::vector<std::string>&>("labels"), 1);
193
194 results.emplace_back(asr::AsrResult(classificationResult,
195 (audioDataSlider.Index() *
196 audioParamsSecondsPerSample *
197 audioParamsWinStride),
198 audioDataSlider.Index(), scoreThreshold));
199
200#if VERIFY_TEST_OUTPUT
201 arm::app::DumpTensor(outputTensor,
202 outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx]);
203#endif /* VERIFY_TEST_OUTPUT */
204
205 }
206
207 /* Erase. */
208 str_inf = std::string(str_inf.size(), ' ');
209 platform.data_psn->present_data_text(
210 str_inf.c_str(), str_inf.size(),
211 dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
212
213 ctx.Set<std::vector<arm::app::asr::AsrResult>>("results", results);
214
215 if (!_PresentInferenceResult(platform, results)) {
216 return false;
217 }
218
219 _IncrementAppCtxClipIdx(ctx);
220
221 } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
222
223 return true;
224 }
225
226 static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
227 {
228 auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
229
230 if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
231 ctx.Set<uint32_t>("clipIndex", 0);
232 return;
233 }
234 ++curAudioIdx;
235 ctx.Set<uint32_t>("clipIndex", curAudioIdx);
236 }
237
238 static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
239 {
240 if (idx >= NUMBER_OF_FILES) {
241 printf_err("Invalid idx %u (expected less than %u)\n",
242 idx, NUMBER_OF_FILES);
243 return false;
244 }
245
246 ctx.Set<uint32_t>("clipIndex", idx);
247 return true;
248 }
249
250 static bool _PresentInferenceResult(hal_platform& platform,
251 const std::vector<arm::app::asr::AsrResult>& results)
252 {
253 constexpr uint32_t dataPsnTxtStartX1 = 20;
254 constexpr uint32_t dataPsnTxtStartY1 = 60;
255 constexpr bool allow_multiple_lines = true;
256
257 platform.data_psn->set_text_color(COLOR_GREEN);
258
259 /* Results from multiple inferences should be combined before processing. */
260 std::vector<arm::app::ClassificationResult> combinedResults;
261 for (auto& result : results) {
262 combinedResults.insert(combinedResults.end(),
263 result.m_resultVec.begin(),
264 result.m_resultVec.end());
265 }
266
267 /* Get each inference result string using the decoder. */
268 for (const auto & result : results) {
269 std::string infResultStr = audio::asr::DecodeOutput(result.m_resultVec);
270
271 info("Result for inf %u: %s\n", result.m_inferenceNumber,
272 infResultStr.c_str());
273 }
274
275 /* Get the decoded result for the combined result. */
276 std::string finalResultStr = audio::asr::DecodeOutput(combinedResults);
277
278 platform.data_psn->present_data_text(
279 finalResultStr.c_str(), finalResultStr.size(),
280 dataPsnTxtStartX1, dataPsnTxtStartY1,
281 allow_multiple_lines);
282
283 info("Final result: %s\n", finalResultStr.c_str());
284 return true;
285 }
286
287} /* namespace app */
288} /* namespace arm */