blob: cc1b4d70575485d02b6c9997c5502de5e0dcbde5 [file] [log] [blame]
Richard Burton00553462021-11-10 16:27:14 +00001/*
2 * Copyright (c) 2021 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "RNNoiseModel.hpp"
18#include "UseCaseHandler.hpp"
19#include "InputFiles.hpp"
20#include "RNNUCTestCaseData.hpp"
21#include "UseCaseCommonUtils.hpp"
22
23#include <catch.hpp>
24#include <hal.h>
25#include <Profiler.hpp>
Richard Burton033c9152021-12-07 14:04:44 +000026
Kshitij Sisodia4cc40212022-04-08 09:54:53 +010027#define PLATFORM hal_platform_init();
Richard Burton00553462021-11-10 16:27:14 +000028
29#define CONTEXT \
30arm::app::ApplicationContext caseContext; \
Kshitij Sisodia4cc40212022-04-08 09:54:53 +010031arm::app::Profiler profiler{"noise_reduction"}; \
Richard Burton00553462021-11-10 16:27:14 +000032caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
Richard Burton00553462021-11-10 16:27:14 +000033caseContext.Set<arm::app::RNNoiseModel&>("model", model);
34
35TEST_CASE("Verify output tensor memory dump")
36{
37 constexpr size_t maxMemDumpSz = 0x100000; /* 1 MiB worth of space */
38 std::vector<uint8_t> memPool(maxMemDumpSz); /* Memory pool */
39 arm::app::RNNoiseModel model{};
40
41 REQUIRE(model.Init());
42 REQUIRE(model.IsInited());
43
44 /* Populate the output tensors */
45 const size_t numOutputs = model.GetNumOutputs();
46 size_t sizeToWrite = 0;
47 size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
48
49 for (size_t i = 0; i < numOutputs; ++i) {
50 TfLiteTensor* tensor = model.GetOutputTensor(i);
51 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
52
53 if (tensor->bytes > 0) {
54 memset(tData, static_cast<uint8_t>(i), tensor->bytes);
55 sizeToWrite += tensor->bytes;
56 }
57 }
58
59
60 SECTION("Positive use case")
61 {
62 /* Run the memory dump */
63 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), memPool.size());
64 REQUIRE(sizeToWrite == bytesWritten);
65
66 /* Verify the dump */
67 size_t k = 0;
68 for (size_t i = 0; i < numOutputs && k < memPool.size(); ++i) {
69 TfLiteTensor* tensor = model.GetOutputTensor(i);
70 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
71
72 for (size_t j = 0; j < tensor->bytes && k < memPool.size(); ++j) {
73 REQUIRE(tData[j] == memPool[k++]);
74 }
75 }
76 }
77
78 SECTION("Limited memory - skipping last tensor")
79 {
80 /* Run the memory dump */
81 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), sizeToWrite - 1);
82 REQUIRE(lastTensorSize > 0);
83 REQUIRE(bytesWritten == sizeToWrite - lastTensorSize);
84 }
85
86 SECTION("Zero memory")
87 {
88 /* Run the memory dump */
89 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), 0);
90 REQUIRE(bytesWritten == 0);
91 }
92}
93
94TEST_CASE("Inference run all clips", "[RNNoise]")
95{
96 PLATFORM
97
98 arm::app::RNNoiseModel model;
99
100 CONTEXT
101
102 caseContext.Set<uint32_t>("clipIndex", 0);
103 caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
104 caseContext.Set<uint32_t>("frameLength", g_FrameLength);
105 caseContext.Set<uint32_t>("frameStride", g_FrameStride);
106
107 /* Load the model. */
108 REQUIRE(model.Init());
109
110 REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
111}
112
113std::function<uint32_t(const uint32_t)> get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures) {
114
115 return [numberOfFeatures](const uint32_t) -> uint32_t{
116 return numberOfFeatures;
117 };
118}
119
120const char* get_test_filename(const uint32_t idx) {
121 auto name = get_filename(idx);
122 REQUIRE(std::string("p232_208.wav") == name);
123 return "p232_208.wav";
124}
125
126void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
127 PLATFORM
128
129 arm::app::RNNoiseModel model;
130
131 CONTEXT
132
133 caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
134 caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
135 caseContext.Set<uint32_t>("frameLength", g_FrameLength);
136 caseContext.Set<uint32_t>("frameStride", g_FrameStride);
137 caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
138 /* Load the model. */
139 REQUIRE(model.Init());
140
141 size_t oneInferenceOutSizeBytes = g_FrameLength * sizeof(int16_t);
142
143 auto infIndex = 0;
144 for (auto numInf: numberOfInferences) {
145 DYNAMIC_SECTION("Number of features: "<< numInf) {
146 caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
147 uint32_t audioSizeInput = numInf*g_FrameLength;
148 caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
149 get_golden_input_p232_208_array_size(audioSizeInput));
150
151 size_t headerNumBytes = 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
152 size_t footerNumBytes = 4; /* Eof value. */
153 size_t memDumpMaxLenBytes = headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
154
155 std::vector<uint8_t > memDump(memDumpMaxLenBytes);
156 size_t undefMemDumpBytesWritten = 0;
157 caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLenBytes);
158 caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDump.data());
159 caseContext.Set<size_t*>("MEM_DUMP_BYTE_WRITTEN", &undefMemDumpBytesWritten);
160
161 /* Inference. */
162 REQUIRE(arm::app::NoiseReductionHandler(caseContext, false));
163
164 /* The expected output after post-processing. */
165 std::vector<int16_t> golden(&ofms[infIndex][0], &ofms[infIndex][0] + g_FrameLength);
166
167 size_t startOfLastInfOut = undefMemDumpBytesWritten - oneInferenceOutSizeBytes;
168
169 /* The actual result from the usecase handler. */
170 std::vector<int16_t> runtime(g_FrameLength);
171 std::memcpy(runtime.data(), &memDump[startOfLastInfOut], oneInferenceOutSizeBytes);
172
Richard Burton033c9152021-12-07 14:04:44 +0000173 /* Margin of 43 is 0.07% error. */
174 REQUIRE_THAT(golden, Catch::Matchers::Approx(runtime).margin(43));
Richard Burton00553462021-11-10 16:27:14 +0000175 }
176 ++infIndex;
177 }
178}
179
180TEST_CASE("Inference by index - one inference", "[RNNoise]")
181{
182 auto totalAudioSize = get_audio_array_size(1);
183 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
184
185 /* Run 1 inference */
186 std::vector<uint32_t> numberOfInferences = {1};
187 testInfByIndex(numberOfInferences);
188}
189
190TEST_CASE("Inference by index - several inferences", "[RNNoise]")
191{
192 auto totalAudioSize = get_audio_array_size(1);
193 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
194
195 /* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
196 uint32_t totalInferences = totalAudioSize / g_FrameLength;
197 std::vector<uint32_t> numberOfInferences = {1, 2, totalInferences};
198 testInfByIndex(numberOfInferences);
199}