blob: d835c21fe479805a7133952d4bebc51af6615eb4 [file] [log] [blame]
Richard Burton00553462021-11-10 16:27:14 +00001/*
Richard Burtonf32a86a2022-11-15 11:46:11 +00002 * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
Richard Burton00553462021-11-10 16:27:14 +00003 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "RNNoiseModel.hpp"
18#include "UseCaseHandler.hpp"
19#include "InputFiles.hpp"
20#include "RNNUCTestCaseData.hpp"
Richard Burtonec5e99b2022-10-05 11:00:37 +010021#include "BufAttributes.hpp"
22#include "hal.h"
23#include "Profiler.hpp"
Richard Burton00553462021-11-10 16:27:14 +000024
25#include <catch.hpp>
Richard Burton033c9152021-12-07 14:04:44 +000026
Kshitij Sisodiaaa4bcb12022-05-06 09:13:03 +010027namespace arm {
Liam Barry213a5432022-05-09 17:06:19 +010028namespace app {
29 static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
30 namespace rnn {
31 extern uint8_t* GetModelPointer();
32 extern size_t GetModelLen();
33 } /* namespace rnn */
34} /* namespace app */
Kshitij Sisodiaaa4bcb12022-05-06 09:13:03 +010035} /* namespace arm */
36
Kshitij Sisodia4cc40212022-04-08 09:54:53 +010037#define PLATFORM hal_platform_init();
Richard Burton00553462021-11-10 16:27:14 +000038
39#define CONTEXT \
40arm::app::ApplicationContext caseContext; \
Kshitij Sisodia4cc40212022-04-08 09:54:53 +010041arm::app::Profiler profiler{"noise_reduction"}; \
Richard Burton00553462021-11-10 16:27:14 +000042caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
Richard Burton00553462021-11-10 16:27:14 +000043caseContext.Set<arm::app::RNNoiseModel&>("model", model);
44
45TEST_CASE("Verify output tensor memory dump")
46{
47 constexpr size_t maxMemDumpSz = 0x100000; /* 1 MiB worth of space */
48 std::vector<uint8_t> memPool(maxMemDumpSz); /* Memory pool */
49 arm::app::RNNoiseModel model{};
50
Kshitij Sisodiaaa4bcb12022-05-06 09:13:03 +010051 REQUIRE(model.Init(arm::app::tensorArena,
Liam Barry213a5432022-05-09 17:06:19 +010052 sizeof(arm::app::tensorArena),
53 arm::app::rnn::GetModelPointer(),
54 arm::app::rnn::GetModelLen()));
Richard Burton00553462021-11-10 16:27:14 +000055 REQUIRE(model.IsInited());
56
57 /* Populate the output tensors */
58 const size_t numOutputs = model.GetNumOutputs();
59 size_t sizeToWrite = 0;
60 size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
61
62 for (size_t i = 0; i < numOutputs; ++i) {
63 TfLiteTensor* tensor = model.GetOutputTensor(i);
64 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
65
66 if (tensor->bytes > 0) {
67 memset(tData, static_cast<uint8_t>(i), tensor->bytes);
68 sizeToWrite += tensor->bytes;
69 }
70 }
71
72
73 SECTION("Positive use case")
74 {
75 /* Run the memory dump */
76 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), memPool.size());
77 REQUIRE(sizeToWrite == bytesWritten);
78
79 /* Verify the dump */
80 size_t k = 0;
81 for (size_t i = 0; i < numOutputs && k < memPool.size(); ++i) {
82 TfLiteTensor* tensor = model.GetOutputTensor(i);
83 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
84
85 for (size_t j = 0; j < tensor->bytes && k < memPool.size(); ++j) {
86 REQUIRE(tData[j] == memPool[k++]);
87 }
88 }
89 }
90
91 SECTION("Limited memory - skipping last tensor")
92 {
93 /* Run the memory dump */
94 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), sizeToWrite - 1);
95 REQUIRE(lastTensorSize > 0);
96 REQUIRE(bytesWritten == sizeToWrite - lastTensorSize);
97 }
98
99 SECTION("Zero memory")
100 {
101 /* Run the memory dump */
102 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), 0);
103 REQUIRE(bytesWritten == 0);
104 }
105}
106
107TEST_CASE("Inference run all clips", "[RNNoise]")
108{
109 PLATFORM
110
111 arm::app::RNNoiseModel model;
112
113 CONTEXT
114
115 caseContext.Set<uint32_t>("clipIndex", 0);
Liam Barry213a5432022-05-09 17:06:19 +0100116 caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
117 caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
118 caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
Richard Burton00553462021-11-10 16:27:14 +0000119
120 /* Load the model. */
Kshitij Sisodiaaa4bcb12022-05-06 09:13:03 +0100121 REQUIRE(model.Init(arm::app::tensorArena,
Liam Barry213a5432022-05-09 17:06:19 +0100122 sizeof(arm::app::tensorArena),
123 arm::app::rnn::GetModelPointer(),
124 arm::app::rnn::GetModelLen()));
Richard Burton00553462021-11-10 16:27:14 +0000125
126 REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
127}
128
129std::function<uint32_t(const uint32_t)> get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures) {
130
131 return [numberOfFeatures](const uint32_t) -> uint32_t{
132 return numberOfFeatures;
133 };
134}
135
136const char* get_test_filename(const uint32_t idx) {
137 auto name = get_filename(idx);
138 REQUIRE(std::string("p232_208.wav") == name);
139 return "p232_208.wav";
140}
141
142void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
143 PLATFORM
144
145 arm::app::RNNoiseModel model;
146
147 CONTEXT
148
149 caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
150 caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
Liam Barry213a5432022-05-09 17:06:19 +0100151 caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
152 caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
153 caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
Richard Burton00553462021-11-10 16:27:14 +0000154 /* Load the model. */
Kshitij Sisodiaaa4bcb12022-05-06 09:13:03 +0100155 REQUIRE(model.Init(arm::app::tensorArena,
Liam Barry213a5432022-05-09 17:06:19 +0100156 sizeof(arm::app::tensorArena),
157 arm::app::rnn::GetModelPointer(),
158 arm::app::rnn::GetModelLen()));
Richard Burton00553462021-11-10 16:27:14 +0000159
Liam Barry213a5432022-05-09 17:06:19 +0100160 size_t oneInferenceOutSizeBytes = arm::app::rnn::g_FrameLength * sizeof(int16_t);
Richard Burton00553462021-11-10 16:27:14 +0000161
162 auto infIndex = 0;
163 for (auto numInf: numberOfInferences) {
164 DYNAMIC_SECTION("Number of features: "<< numInf) {
165 caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
Liam Barry213a5432022-05-09 17:06:19 +0100166 uint32_t audioSizeInput = numInf * arm::app::rnn::g_FrameLength;
Richard Burton00553462021-11-10 16:27:14 +0000167 caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
168 get_golden_input_p232_208_array_size(audioSizeInput));
169
170 size_t headerNumBytes = 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
171 size_t footerNumBytes = 4; /* Eof value. */
172 size_t memDumpMaxLenBytes = headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
173
174 std::vector<uint8_t > memDump(memDumpMaxLenBytes);
175 size_t undefMemDumpBytesWritten = 0;
176 caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLenBytes);
177 caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDump.data());
178 caseContext.Set<size_t*>("MEM_DUMP_BYTE_WRITTEN", &undefMemDumpBytesWritten);
179
180 /* Inference. */
181 REQUIRE(arm::app::NoiseReductionHandler(caseContext, false));
182
183 /* The expected output after post-processing. */
Liam Barry213a5432022-05-09 17:06:19 +0100184 std::vector<int16_t> golden(&ofms[infIndex][0],
185 &ofms[infIndex][0] + arm::app::rnn::g_FrameLength);
Richard Burton00553462021-11-10 16:27:14 +0000186
187 size_t startOfLastInfOut = undefMemDumpBytesWritten - oneInferenceOutSizeBytes;
188
189 /* The actual result from the usecase handler. */
Liam Barry213a5432022-05-09 17:06:19 +0100190 std::vector<int16_t> runtime(arm::app::rnn::g_FrameLength);
Richard Burton00553462021-11-10 16:27:14 +0000191 std::memcpy(runtime.data(), &memDump[startOfLastInfOut], oneInferenceOutSizeBytes);
192
Richard Burton033c9152021-12-07 14:04:44 +0000193 /* Margin of 43 is 0.07% error. */
194 REQUIRE_THAT(golden, Catch::Matchers::Approx(runtime).margin(43));
Richard Burton00553462021-11-10 16:27:14 +0000195 }
196 ++infIndex;
197 }
198}
199
200TEST_CASE("Inference by index - one inference", "[RNNoise]")
201{
202 auto totalAudioSize = get_audio_array_size(1);
203 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
204
205 /* Run 1 inference */
206 std::vector<uint32_t> numberOfInferences = {1};
207 testInfByIndex(numberOfInferences);
208}
209
210TEST_CASE("Inference by index - several inferences", "[RNNoise]")
211{
212 auto totalAudioSize = get_audio_array_size(1);
213 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
214
215 /* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
Liam Barry213a5432022-05-09 17:06:19 +0100216 uint32_t totalInferences = totalAudioSize / arm::app::rnn::g_FrameLength;
Richard Burton00553462021-11-10 16:27:14 +0000217 std::vector<uint32_t> numberOfInferences = {1, 2, totalInferences};
218 testInfByIndex(numberOfInferences);
219}