blob: 328709dd814055b716c45a3ab3a52dd076234823 [file] [log] [blame]
Richard Burtone6398cd2022-04-13 11:58:28 +01001/*
2 * Copyright (c) 2022 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "KwsProcessing.hpp"
18#include "ImageUtils.hpp"
19#include "log_macros.h"
20#include "MicroNetKwsModel.hpp"
21
22namespace arm {
23namespace app {
24
Richard Burtonb40ecf82022-04-22 16:14:57 +010025 KwsPreProcess::KwsPreProcess(TfLiteTensor* inputTensor, size_t numFeatures, size_t numMfccFrames,
26 int mfccFrameLength, int mfccFrameStride
27 ):
28 m_inputTensor{inputTensor},
Richard Burtone6398cd2022-04-13 11:58:28 +010029 m_mfccFrameLength{mfccFrameLength},
30 m_mfccFrameStride{mfccFrameStride},
Richard Burtonb40ecf82022-04-22 16:14:57 +010031 m_numMfccFrames{numMfccFrames},
Richard Burtone6398cd2022-04-13 11:58:28 +010032 m_mfcc{audio::MicroNetKwsMFCC(numFeatures, mfccFrameLength)}
33 {
Richard Burtone6398cd2022-04-13 11:58:28 +010034 this->m_mfcc.Init();
35
Richard Burtone6398cd2022-04-13 11:58:28 +010036 /* Deduce the data length required for 1 inference from the network parameters. */
Richard Burtonb40ecf82022-04-22 16:14:57 +010037 this->m_audioDataWindowSize = this->m_numMfccFrames * this->m_mfccFrameStride +
Richard Burtone6398cd2022-04-13 11:58:28 +010038 (this->m_mfccFrameLength - this->m_mfccFrameStride);
39
40 /* Creating an MFCC feature sliding window for the data required for 1 inference. */
41 this->m_mfccSlidingWindow = audio::SlidingWindow<const int16_t>(nullptr, this->m_audioDataWindowSize,
42 this->m_mfccFrameLength, this->m_mfccFrameStride);
43
44 /* For longer audio clips we choose to move by half the audio window size
45 * => for a 1 second window size there is an overlap of 0.5 seconds. */
46 this->m_audioDataStride = this->m_audioDataWindowSize / 2;
47
48 /* To have the previously calculated features re-usable, stride must be multiple
49 * of MFCC features window stride. Reduce stride through audio if needed. */
50 if (0 != this->m_audioDataStride % this->m_mfccFrameStride) {
51 this->m_audioDataStride -= this->m_audioDataStride % this->m_mfccFrameStride;
52 }
53
54 this->m_numMfccVectorsInAudioStride = this->m_audioDataStride / this->m_mfccFrameStride;
55
56 /* Calculate number of the feature vectors in the window overlap region.
57 * These feature vectors will be reused.*/
58 this->m_numReusedMfccVectors = this->m_mfccSlidingWindow.TotalStrides() + 1
59 - this->m_numMfccVectorsInAudioStride;
60
61 /* Construct feature calculation function. */
Richard Burtonb40ecf82022-04-22 16:14:57 +010062 this->m_mfccFeatureCalculator = GetFeatureCalculator(this->m_mfcc, this->m_inputTensor,
Richard Burtone6398cd2022-04-13 11:58:28 +010063 this->m_numReusedMfccVectors);
64
65 if (!this->m_mfccFeatureCalculator) {
66 printf_err("Feature calculator not initialized.");
67 }
68 }
69
Richard Burtonb40ecf82022-04-22 16:14:57 +010070 bool KwsPreProcess::DoPreProcess(const void* data, size_t inputSize)
Richard Burtone6398cd2022-04-13 11:58:28 +010071 {
72 UNUSED(inputSize);
73 if (data == nullptr) {
74 printf_err("Data pointer is null");
75 }
76
77 /* Set the features sliding window to the new address. */
78 auto input = static_cast<const int16_t*>(data);
79 this->m_mfccSlidingWindow.Reset(input);
80
81 /* Cache is only usable if we have more than 1 inference in an audio clip. */
82 bool useCache = this->m_audioWindowIndex > 0 && this->m_numReusedMfccVectors > 0;
83
84 /* Use a sliding window to calculate MFCC features frame by frame. */
85 while (this->m_mfccSlidingWindow.HasNext()) {
86 const int16_t* mfccWindow = this->m_mfccSlidingWindow.Next();
87
88 std::vector<int16_t> mfccFrameAudioData = std::vector<int16_t>(mfccWindow,
89 mfccWindow + this->m_mfccFrameLength);
90
91 /* Compute features for this window and write them to input tensor. */
92 this->m_mfccFeatureCalculator(mfccFrameAudioData, this->m_mfccSlidingWindow.Index(),
93 useCache, this->m_numMfccVectorsInAudioStride);
94 }
95
96 debug("Input tensor populated \n");
97
98 return true;
99 }
100
101 /**
102 * @brief Generic feature calculator factory.
103 *
104 * Returns lambda function to compute features using features cache.
105 * Real features math is done by a lambda function provided as a parameter.
106 * Features are written to input tensor memory.
107 *
108 * @tparam T Feature vector type.
109 * @param[in] inputTensor Model input tensor pointer.
110 * @param[in] cacheSize Number of feature vectors to cache. Defined by the sliding window overlap.
111 * @param[in] compute Features calculator function.
112 * @return Lambda function to compute features.
113 */
114 template<class T>
115 std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
Richard Burtonb40ecf82022-04-22 16:14:57 +0100116 KwsPreProcess::FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
117 std::function<std::vector<T> (std::vector<int16_t>& )> compute)
Richard Burtone6398cd2022-04-13 11:58:28 +0100118 {
119 /* Feature cache to be captured by lambda function. */
120 static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
121
122 return [=](std::vector<int16_t>& audioDataWindow,
123 size_t index,
124 bool useCache,
125 size_t featuresOverlapIndex)
126 {
127 T* tensorData = tflite::GetTensorData<T>(inputTensor);
128 std::vector<T> features;
129
130 /* Reuse features from cache if cache is ready and sliding windows overlap.
131 * Overlap is in the beginning of sliding window with a size of a feature cache. */
132 if (useCache && index < featureCache.size()) {
133 features = std::move(featureCache[index]);
134 } else {
135 features = std::move(compute(audioDataWindow));
136 }
137 auto size = features.size();
138 auto sizeBytes = sizeof(T) * size;
139 std::memcpy(tensorData + (index * size), features.data(), sizeBytes);
140
141 /* Start renewing cache as soon iteration goes out of the windows overlap. */
142 if (index >= featuresOverlapIndex) {
143 featureCache[index - featuresOverlapIndex] = std::move(features);
144 }
145 };
146 }
147
148 template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
Richard Burtonb40ecf82022-04-22 16:14:57 +0100149 KwsPreProcess::FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
150 size_t cacheSize,
151 std::function<std::vector<int8_t> (std::vector<int16_t>&)> compute);
Richard Burtone6398cd2022-04-13 11:58:28 +0100152
153 template std::function<void(std::vector<int16_t>&, size_t, bool, size_t)>
Richard Burtonb40ecf82022-04-22 16:14:57 +0100154 KwsPreProcess::FeatureCalc<float>(TfLiteTensor* inputTensor,
155 size_t cacheSize,
156 std::function<std::vector<float>(std::vector<int16_t>&)> compute);
Richard Burtone6398cd2022-04-13 11:58:28 +0100157
158
159 std::function<void (std::vector<int16_t>&, int, bool, size_t)>
Richard Burtonb40ecf82022-04-22 16:14:57 +0100160 KwsPreProcess::GetFeatureCalculator(audio::MicroNetKwsMFCC& mfcc, TfLiteTensor* inputTensor, size_t cacheSize)
Richard Burtone6398cd2022-04-13 11:58:28 +0100161 {
162 std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> mfccFeatureCalc;
163
164 TfLiteQuantization quant = inputTensor->quantization;
165
166 if (kTfLiteAffineQuantization == quant.type) {
167 auto *quantParams = (TfLiteAffineQuantization *) quant.params;
168 const float quantScale = quantParams->scale->data[0];
169 const int quantOffset = quantParams->zero_point->data[0];
170
171 switch (inputTensor->type) {
172 case kTfLiteInt8: {
173 mfccFeatureCalc = this->FeatureCalc<int8_t>(inputTensor,
174 cacheSize,
175 [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
176 return mfcc.MfccComputeQuant<int8_t>(audioDataWindow,
177 quantScale,
178 quantOffset);
179 }
180 );
181 break;
182 }
183 default:
184 printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
185 }
186 } else {
187 mfccFeatureCalc = this->FeatureCalc<float>(inputTensor, cacheSize,
188 [&mfcc](std::vector<int16_t>& audioDataWindow) {
189 return mfcc.MfccCompute(audioDataWindow); }
190 );
191 }
192 return mfccFeatureCalc;
193 }
194
Richard Burtonb40ecf82022-04-22 16:14:57 +0100195 KwsPostProcess::KwsPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
Richard Burtone6398cd2022-04-13 11:58:28 +0100196 const std::vector<std::string>& labels,
Richard Burtonc2911442022-04-22 09:08:21 +0100197 std::vector<ClassificationResult>& results)
Richard Burtonb40ecf82022-04-22 16:14:57 +0100198 :m_outputTensor{outputTensor},
199 m_kwsClassifier{classifier},
Richard Burtone6398cd2022-04-13 11:58:28 +0100200 m_labels{labels},
Richard Burtonc2911442022-04-22 09:08:21 +0100201 m_results{results}
Richard Burtonb40ecf82022-04-22 16:14:57 +0100202 {}
Richard Burtone6398cd2022-04-13 11:58:28 +0100203
Richard Burtonb40ecf82022-04-22 16:14:57 +0100204 bool KwsPostProcess::DoPostProcess()
Richard Burtone6398cd2022-04-13 11:58:28 +0100205 {
206 return this->m_kwsClassifier.GetClassificationResults(
Richard Burtonb40ecf82022-04-22 16:14:57 +0100207 this->m_outputTensor, this->m_results,
Richard Burtone6398cd2022-04-13 11:58:28 +0100208 this->m_labels, 1, true);
209 }
210
211} /* namespace app */
212} /* namespace arm */