Richard Burton | 0055346 | 2021-11-10 16:27:14 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021 Arm Limited. All rights reserved. |
| 3 | * SPDX-License-Identifier: Apache-2.0 |
| 4 | * |
| 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
| 8 | * |
| 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | * |
| 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| 16 | */ |
| 17 | #include <cmath> |
| 18 | #include <algorithm> |
| 19 | |
| 20 | #include "UseCaseHandler.hpp" |
| 21 | #include "hal.h" |
| 22 | #include "UseCaseCommonUtils.hpp" |
| 23 | #include "AudioUtils.hpp" |
| 24 | #include "InputFiles.hpp" |
| 25 | #include "RNNoiseModel.hpp" |
| 26 | #include "RNNoiseProcess.hpp" |
| 27 | |
| 28 | namespace arm { |
| 29 | namespace app { |
| 30 | |
| 31 | /** |
| 32 | * @brief Helper function to increment current audio clip features index. |
| 33 | * @param[in,out] ctx Pointer to the application context object. |
| 34 | **/ |
| 35 | static void IncrementAppCtxClipIdx(ApplicationContext& ctx); |
| 36 | |
| 37 | /** |
| 38 | * @brief Quantize the given features and populate the input Tensor. |
| 39 | * @param[in] inputFeatures Vector of floating point features to quantize. |
| 40 | * @param[in] quantScale Quantization scale for the inputTensor. |
| 41 | * @param[in] quantOffset Quantization offset for the inputTensor. |
| 42 | * @param[in,out] inputTensor TFLite micro tensor to populate. |
| 43 | **/ |
| 44 | static void QuantizeAndPopulateInput(rnn::vec1D32F& inputFeatures, |
| 45 | float quantScale, int quantOffset, |
| 46 | TfLiteTensor* inputTensor); |
| 47 | |
| 48 | /* Noise reduction inference handler. */ |
| 49 | bool NoiseReductionHandler(ApplicationContext& ctx, bool runAll) |
| 50 | { |
| 51 | constexpr uint32_t dataPsnTxtInfStartX = 20; |
| 52 | constexpr uint32_t dataPsnTxtInfStartY = 40; |
| 53 | |
| 54 | /* Variables used for memory dumping. */ |
| 55 | size_t memDumpMaxLen = 0; |
| 56 | uint8_t* memDumpBaseAddr = nullptr; |
| 57 | size_t undefMemDumpBytesWritten = 0; |
| 58 | size_t *pMemDumpBytesWritten = &undefMemDumpBytesWritten; |
| 59 | if (ctx.Has("MEM_DUMP_LEN") && ctx.Has("MEM_DUMP_BASE_ADDR") && ctx.Has("MEM_DUMP_BYTE_WRITTEN")) { |
| 60 | memDumpMaxLen = ctx.Get<size_t>("MEM_DUMP_LEN"); |
| 61 | memDumpBaseAddr = ctx.Get<uint8_t*>("MEM_DUMP_BASE_ADDR"); |
| 62 | pMemDumpBytesWritten = ctx.Get<size_t*>("MEM_DUMP_BYTE_WRITTEN"); |
| 63 | } |
| 64 | std::reference_wrapper<size_t> memDumpBytesWritten = std::ref(*pMemDumpBytesWritten); |
| 65 | |
| 66 | auto& platform = ctx.Get<hal_platform&>("platform"); |
| 67 | platform.data_psn->clear(COLOR_BLACK); |
| 68 | |
| 69 | auto& profiler = ctx.Get<Profiler&>("profiler"); |
| 70 | |
| 71 | /* Get model reference. */ |
| 72 | auto& model = ctx.Get<RNNoiseModel&>("model"); |
| 73 | if (!model.IsInited()) { |
| 74 | printf_err("Model is not initialised! Terminating processing.\n"); |
| 75 | return false; |
| 76 | } |
| 77 | |
| 78 | /* Populate Pre-Processing related parameters. */ |
| 79 | auto audioParamsWinLen = ctx.Get<uint32_t>("frameLength"); |
| 80 | auto audioParamsWinStride = ctx.Get<uint32_t>("frameStride"); |
| 81 | auto nrNumInputFeatures = ctx.Get<uint32_t>("numInputFeatures"); |
| 82 | |
| 83 | TfLiteTensor* inputTensor = model.GetInputTensor(0); |
| 84 | if (nrNumInputFeatures != inputTensor->bytes) { |
| 85 | printf_err("Input features size must be equal to input tensor size." |
| 86 | " Feature size = %" PRIu32 ", Tensor size = %zu.\n", |
| 87 | nrNumInputFeatures, inputTensor->bytes); |
| 88 | return false; |
| 89 | } |
| 90 | |
| 91 | TfLiteTensor* outputTensor = model.GetOutputTensor(model.m_indexForModelOutput); |
| 92 | |
| 93 | /* Initial choice of index for WAV file. */ |
| 94 | auto startClipIdx = ctx.Get<uint32_t>("clipIndex"); |
| 95 | |
| 96 | std::function<const int16_t* (const uint32_t)> audioAccessorFunc = get_audio_array; |
| 97 | if (ctx.Has("features")) { |
| 98 | audioAccessorFunc = ctx.Get<std::function<const int16_t* (const uint32_t)>>("features"); |
| 99 | } |
| 100 | std::function<uint32_t (const uint32_t)> audioSizeAccessorFunc = get_audio_array_size; |
| 101 | if (ctx.Has("featureSizes")) { |
| 102 | audioSizeAccessorFunc = ctx.Get<std::function<uint32_t (const uint32_t)>>("featureSizes"); |
| 103 | } |
| 104 | std::function<const char*(const uint32_t)> audioFileAccessorFunc = get_filename; |
| 105 | if (ctx.Has("featureFileNames")) { |
| 106 | audioFileAccessorFunc = ctx.Get<std::function<const char*(const uint32_t)>>("featureFileNames"); |
| 107 | } |
| 108 | do{ |
| 109 | auto startDumpAddress = memDumpBaseAddr + memDumpBytesWritten; |
| 110 | auto currentIndex = ctx.Get<uint32_t>("clipIndex"); |
| 111 | |
| 112 | /* Creating a sliding window through the audio. */ |
| 113 | auto audioDataSlider = audio::SlidingWindow<const int16_t>( |
| 114 | audioAccessorFunc(currentIndex), |
| 115 | audioSizeAccessorFunc(currentIndex), audioParamsWinLen, |
| 116 | audioParamsWinStride); |
| 117 | |
| 118 | info("Running inference on input feature map %" PRIu32 " => %s\n", currentIndex, |
| 119 | audioFileAccessorFunc(currentIndex)); |
| 120 | |
| 121 | memDumpBytesWritten += DumpDenoisedAudioHeader(audioFileAccessorFunc(currentIndex), |
| 122 | (audioDataSlider.TotalStrides() + 1) * audioParamsWinLen, |
| 123 | memDumpBaseAddr + memDumpBytesWritten, |
| 124 | memDumpMaxLen - memDumpBytesWritten); |
| 125 | |
| 126 | rnn::RNNoiseProcess featureProcessor = rnn::RNNoiseProcess(); |
| 127 | rnn::vec1D32F audioFrame(audioParamsWinLen); |
| 128 | rnn::vec1D32F inputFeatures(nrNumInputFeatures); |
| 129 | rnn::vec1D32F denoisedAudioFrameFloat(audioParamsWinLen); |
| 130 | std::vector<int16_t> denoisedAudioFrame(audioParamsWinLen); |
| 131 | |
| 132 | std::vector<float> modelOutputFloat(outputTensor->bytes); |
| 133 | rnn::FrameFeatures frameFeatures; |
| 134 | bool resetGRU = true; |
| 135 | |
| 136 | while (audioDataSlider.HasNext()) { |
| 137 | const int16_t* inferenceWindow = audioDataSlider.Next(); |
| 138 | audioFrame = rnn::vec1D32F(inferenceWindow, inferenceWindow+audioParamsWinLen); |
| 139 | |
| 140 | featureProcessor.PreprocessFrame(audioFrame.data(), audioParamsWinLen, frameFeatures); |
| 141 | |
| 142 | /* Reset or copy over GRU states first to avoid TFLu memory overlap issues. */ |
| 143 | if (resetGRU){ |
| 144 | model.ResetGruState(); |
| 145 | } else { |
| 146 | /* Copying gru state outputs to gru state inputs. |
| 147 | * Call ResetGruState in between the sequence of inferences on unrelated input data. */ |
| 148 | model.CopyGruStates(); |
| 149 | } |
| 150 | |
| 151 | QuantizeAndPopulateInput(frameFeatures.m_featuresVec, |
| 152 | inputTensor->params.scale, inputTensor->params.zero_point, |
| 153 | inputTensor); |
| 154 | |
| 155 | /* Strings for presentation/logging. */ |
| 156 | std::string str_inf{"Running inference... "}; |
| 157 | |
| 158 | /* Display message on the LCD - inference running. */ |
| 159 | platform.data_psn->present_data_text( |
| 160 | str_inf.c_str(), str_inf.size(), |
| 161 | dataPsnTxtInfStartX, dataPsnTxtInfStartY, false); |
| 162 | |
| 163 | info("Inference %zu/%zu\n", audioDataSlider.Index() + 1, audioDataSlider.TotalStrides() + 1); |
| 164 | |
| 165 | /* Run inference over this feature sliding window. */ |
| 166 | profiler.StartProfiling("Inference"); |
| 167 | bool success = model.RunInference(); |
| 168 | profiler.StopProfiling(); |
| 169 | resetGRU = false; |
| 170 | |
| 171 | if (!success) { |
| 172 | return false; |
| 173 | } |
| 174 | |
| 175 | /* De-quantize main model output ready for post-processing. */ |
| 176 | const auto* outputData = tflite::GetTensorData<int8_t>(outputTensor); |
| 177 | auto outputQuantParams = arm::app::GetTensorQuantParams(outputTensor); |
| 178 | |
| 179 | for (size_t i = 0; i < outputTensor->bytes; ++i) { |
| 180 | modelOutputFloat[i] = (static_cast<float>(outputData[i]) - outputQuantParams.offset) |
| 181 | * outputQuantParams.scale; |
| 182 | } |
| 183 | |
| 184 | /* Round and cast the post-processed results for dumping to wav. */ |
| 185 | featureProcessor.PostProcessFrame(modelOutputFloat, frameFeatures, denoisedAudioFrameFloat); |
| 186 | for (size_t i = 0; i < audioParamsWinLen; ++i) { |
| 187 | denoisedAudioFrame[i] = static_cast<int16_t>(std::roundf(denoisedAudioFrameFloat[i])); |
| 188 | } |
| 189 | |
| 190 | /* Erase. */ |
| 191 | str_inf = std::string(str_inf.size(), ' '); |
| 192 | platform.data_psn->present_data_text( |
| 193 | str_inf.c_str(), str_inf.size(), |
| 194 | dataPsnTxtInfStartX, dataPsnTxtInfStartY, false); |
| 195 | |
| 196 | if (memDumpMaxLen > 0) { |
| 197 | /* Dump output tensors to memory. */ |
| 198 | memDumpBytesWritten += DumpOutputDenoisedAudioFrame( |
| 199 | denoisedAudioFrame, |
| 200 | memDumpBaseAddr + memDumpBytesWritten, |
| 201 | memDumpMaxLen - memDumpBytesWritten); |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | if (memDumpMaxLen > 0) { |
| 206 | /* Needed to not let the compiler complain about type mismatch. */ |
| 207 | size_t valMemDumpBytesWritten = memDumpBytesWritten; |
| 208 | info("Output memory dump of %zu bytes written at address 0x%p\n", |
| 209 | valMemDumpBytesWritten, startDumpAddress); |
| 210 | } |
| 211 | |
| 212 | DumpDenoisedAudioFooter(memDumpBaseAddr + memDumpBytesWritten, memDumpMaxLen - memDumpBytesWritten); |
| 213 | |
| 214 | info("Final results:\n"); |
| 215 | profiler.PrintProfilingResult(); |
| 216 | IncrementAppCtxClipIdx(ctx); |
| 217 | |
| 218 | } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx); |
| 219 | |
| 220 | return true; |
| 221 | } |
| 222 | |
| 223 | size_t DumpDenoisedAudioHeader(const char* filename, size_t dumpSize, |
| 224 | uint8_t *memAddress, size_t memSize){ |
| 225 | |
| 226 | if (memAddress == nullptr){ |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | int32_t filenameLength = strlen(filename); |
| 231 | size_t numBytesWritten = 0; |
| 232 | size_t numBytesToWrite = 0; |
| 233 | int32_t dumpSizeByte = dumpSize * sizeof(int16_t); |
| 234 | bool overflow = false; |
| 235 | |
| 236 | /* Write the filename length */ |
| 237 | numBytesToWrite = sizeof(filenameLength); |
| 238 | if (memSize - numBytesToWrite > 0) { |
| 239 | std::memcpy(memAddress, &filenameLength, numBytesToWrite); |
| 240 | numBytesWritten += numBytesToWrite; |
| 241 | memSize -= numBytesWritten; |
| 242 | } else { |
| 243 | overflow = true; |
| 244 | } |
| 245 | |
| 246 | /* Write file name */ |
| 247 | numBytesToWrite = filenameLength; |
| 248 | if(memSize - numBytesToWrite > 0) { |
| 249 | std::memcpy(memAddress + numBytesWritten, filename, numBytesToWrite); |
| 250 | numBytesWritten += numBytesToWrite; |
| 251 | memSize -= numBytesWritten; |
| 252 | } else { |
| 253 | overflow = true; |
| 254 | } |
| 255 | |
| 256 | /* Write dumpSize in byte */ |
| 257 | numBytesToWrite = sizeof(dumpSizeByte); |
| 258 | if(memSize - numBytesToWrite > 0) { |
| 259 | std::memcpy(memAddress + numBytesWritten, &(dumpSizeByte), numBytesToWrite); |
| 260 | numBytesWritten += numBytesToWrite; |
| 261 | memSize -= numBytesWritten; |
| 262 | } else { |
| 263 | overflow = true; |
| 264 | } |
| 265 | |
| 266 | if(false == overflow) { |
| 267 | info("Audio Clip dump header info (%zu bytes) written to %p\n", numBytesWritten, memAddress); |
| 268 | } else { |
| 269 | printf_err("Not enough memory to dump Audio Clip header.\n"); |
| 270 | } |
| 271 | |
| 272 | return numBytesWritten; |
| 273 | } |
| 274 | |
| 275 | size_t DumpDenoisedAudioFooter(uint8_t *memAddress, size_t memSize){ |
| 276 | if ((memAddress == nullptr) || (memSize < 4)) { |
| 277 | return 0; |
| 278 | } |
| 279 | const int32_t eofMarker = -1; |
| 280 | std::memcpy(memAddress, &eofMarker, sizeof(int32_t)); |
| 281 | |
| 282 | return sizeof(int32_t); |
| 283 | } |
| 284 | |
| 285 | size_t DumpOutputDenoisedAudioFrame(const std::vector<int16_t> &audioFrame, |
| 286 | uint8_t *memAddress, size_t memSize) |
| 287 | { |
| 288 | if (memAddress == nullptr) { |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | size_t numByteToBeWritten = audioFrame.size() * sizeof(int16_t); |
| 293 | if( numByteToBeWritten > memSize) { |
George Gekov | a2b0fc2 | 2021-11-08 16:30:43 +0000 | [diff] [blame] | 294 | printf_err("Overflow error: Writing %zu of %zu bytes to memory @ 0x%p.\n", memSize, numByteToBeWritten, memAddress); |
Richard Burton | 0055346 | 2021-11-10 16:27:14 +0000 | [diff] [blame] | 295 | numByteToBeWritten = memSize; |
| 296 | } |
| 297 | |
| 298 | std::memcpy(memAddress, audioFrame.data(), numByteToBeWritten); |
| 299 | info("Copied %zu bytes to %p\n", numByteToBeWritten, memAddress); |
| 300 | |
| 301 | return numByteToBeWritten; |
| 302 | } |
| 303 | |
| 304 | size_t DumpOutputTensorsToMemory(Model& model, uint8_t* memAddress, const size_t memSize) |
| 305 | { |
| 306 | const size_t numOutputs = model.GetNumOutputs(); |
| 307 | size_t numBytesWritten = 0; |
| 308 | uint8_t* ptr = memAddress; |
| 309 | |
| 310 | /* Iterate over all output tensors. */ |
| 311 | for (size_t i = 0; i < numOutputs; ++i) { |
| 312 | const TfLiteTensor* tensor = model.GetOutputTensor(i); |
| 313 | const auto* tData = tflite::GetTensorData<uint8_t>(tensor); |
| 314 | #if VERIFY_TEST_OUTPUT |
| 315 | arm::app::DumpTensor(tensor); |
| 316 | #endif /* VERIFY_TEST_OUTPUT */ |
| 317 | /* Ensure that we don't overflow the allowed limit. */ |
| 318 | if (numBytesWritten + tensor->bytes <= memSize) { |
| 319 | if (tensor->bytes > 0) { |
| 320 | std::memcpy(ptr, tData, tensor->bytes); |
| 321 | |
| 322 | info("Copied %zu bytes for tensor %zu to 0x%p\n", |
| 323 | tensor->bytes, i, ptr); |
| 324 | |
| 325 | numBytesWritten += tensor->bytes; |
| 326 | ptr += tensor->bytes; |
| 327 | } |
| 328 | } else { |
| 329 | printf_err("Error writing tensor %zu to memory @ 0x%p\n", |
| 330 | i, memAddress); |
| 331 | break; |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | info("%zu bytes written to memory @ 0x%p\n", numBytesWritten, memAddress); |
| 336 | |
| 337 | return numBytesWritten; |
| 338 | } |
| 339 | |
| 340 | static void IncrementAppCtxClipIdx(ApplicationContext& ctx) |
| 341 | { |
| 342 | auto curClipIdx = ctx.Get<uint32_t>("clipIndex"); |
| 343 | if (curClipIdx + 1 >= NUMBER_OF_FILES) { |
| 344 | ctx.Set<uint32_t>("clipIndex", 0); |
| 345 | return; |
| 346 | } |
| 347 | ++curClipIdx; |
| 348 | ctx.Set<uint32_t>("clipIndex", curClipIdx); |
| 349 | } |
| 350 | |
| 351 | void QuantizeAndPopulateInput(rnn::vec1D32F& inputFeatures, |
| 352 | const float quantScale, const int quantOffset, TfLiteTensor* inputTensor) |
| 353 | { |
| 354 | const float minVal = std::numeric_limits<int8_t>::min(); |
| 355 | const float maxVal = std::numeric_limits<int8_t>::max(); |
| 356 | |
| 357 | auto* inputTensorData = tflite::GetTensorData<int8_t>(inputTensor); |
| 358 | |
| 359 | for (size_t i=0; i < inputFeatures.size(); ++i) { |
| 360 | float quantValue = ((inputFeatures[i] / quantScale) + quantOffset); |
| 361 | inputTensorData[i] = static_cast<int8_t>(std::min<float>(std::max<float>(quantValue, minVal), maxVal)); |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | |
| 366 | } /* namespace app */ |
| 367 | } /* namespace arm */ |