blob: a376dd5644e3a1e79aabb92e445ead4a83be1d6d [file] [log] [blame]
Richard Burton00553462021-11-10 16:27:14 +00001/*
2 * Copyright (c) 2021 Arm Limited. All rights reserved.
3 * SPDX-License-Identifier: Apache-2.0
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include "RNNoiseModel.hpp"
18#include "UseCaseHandler.hpp"
19#include "InputFiles.hpp"
20#include "RNNUCTestCaseData.hpp"
21#include "UseCaseCommonUtils.hpp"
22
23#include <catch.hpp>
24#include <hal.h>
25#include <Profiler.hpp>
Richard Burton033c9152021-12-07 14:04:44 +000026
Richard Burton00553462021-11-10 16:27:14 +000027#define PLATFORM \
28hal_platform platform; \
Richard Burton00553462021-11-10 16:27:14 +000029platform_timer timer; \
Kshitij Sisodia68fdd112022-04-06 13:03:20 +010030hal_init(&platform, &timer); \
Richard Burton00553462021-11-10 16:27:14 +000031hal_platform_init(&platform);
32
33#define CONTEXT \
34arm::app::ApplicationContext caseContext; \
35arm::app::Profiler profiler{&platform, "noise_reduction"}; \
36caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
37caseContext.Set<hal_platform&>("platform", platform); \
38caseContext.Set<arm::app::RNNoiseModel&>("model", model);
39
40TEST_CASE("Verify output tensor memory dump")
41{
42 constexpr size_t maxMemDumpSz = 0x100000; /* 1 MiB worth of space */
43 std::vector<uint8_t> memPool(maxMemDumpSz); /* Memory pool */
44 arm::app::RNNoiseModel model{};
45
46 REQUIRE(model.Init());
47 REQUIRE(model.IsInited());
48
49 /* Populate the output tensors */
50 const size_t numOutputs = model.GetNumOutputs();
51 size_t sizeToWrite = 0;
52 size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
53
54 for (size_t i = 0; i < numOutputs; ++i) {
55 TfLiteTensor* tensor = model.GetOutputTensor(i);
56 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
57
58 if (tensor->bytes > 0) {
59 memset(tData, static_cast<uint8_t>(i), tensor->bytes);
60 sizeToWrite += tensor->bytes;
61 }
62 }
63
64
65 SECTION("Positive use case")
66 {
67 /* Run the memory dump */
68 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), memPool.size());
69 REQUIRE(sizeToWrite == bytesWritten);
70
71 /* Verify the dump */
72 size_t k = 0;
73 for (size_t i = 0; i < numOutputs && k < memPool.size(); ++i) {
74 TfLiteTensor* tensor = model.GetOutputTensor(i);
75 auto* tData = tflite::GetTensorData<uint8_t>(tensor);
76
77 for (size_t j = 0; j < tensor->bytes && k < memPool.size(); ++j) {
78 REQUIRE(tData[j] == memPool[k++]);
79 }
80 }
81 }
82
83 SECTION("Limited memory - skipping last tensor")
84 {
85 /* Run the memory dump */
86 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), sizeToWrite - 1);
87 REQUIRE(lastTensorSize > 0);
88 REQUIRE(bytesWritten == sizeToWrite - lastTensorSize);
89 }
90
91 SECTION("Zero memory")
92 {
93 /* Run the memory dump */
94 auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), 0);
95 REQUIRE(bytesWritten == 0);
96 }
97}
98
99TEST_CASE("Inference run all clips", "[RNNoise]")
100{
101 PLATFORM
102
103 arm::app::RNNoiseModel model;
104
105 CONTEXT
106
107 caseContext.Set<uint32_t>("clipIndex", 0);
108 caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
109 caseContext.Set<uint32_t>("frameLength", g_FrameLength);
110 caseContext.Set<uint32_t>("frameStride", g_FrameStride);
111
112 /* Load the model. */
113 REQUIRE(model.Init());
114
115 REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
116}
117
118std::function<uint32_t(const uint32_t)> get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures) {
119
120 return [numberOfFeatures](const uint32_t) -> uint32_t{
121 return numberOfFeatures;
122 };
123}
124
125const char* get_test_filename(const uint32_t idx) {
126 auto name = get_filename(idx);
127 REQUIRE(std::string("p232_208.wav") == name);
128 return "p232_208.wav";
129}
130
131void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
132 PLATFORM
133
134 arm::app::RNNoiseModel model;
135
136 CONTEXT
137
138 caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
139 caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
140 caseContext.Set<uint32_t>("frameLength", g_FrameLength);
141 caseContext.Set<uint32_t>("frameStride", g_FrameStride);
142 caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
143 /* Load the model. */
144 REQUIRE(model.Init());
145
146 size_t oneInferenceOutSizeBytes = g_FrameLength * sizeof(int16_t);
147
148 auto infIndex = 0;
149 for (auto numInf: numberOfInferences) {
150 DYNAMIC_SECTION("Number of features: "<< numInf) {
151 caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
152 uint32_t audioSizeInput = numInf*g_FrameLength;
153 caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
154 get_golden_input_p232_208_array_size(audioSizeInput));
155
156 size_t headerNumBytes = 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
157 size_t footerNumBytes = 4; /* Eof value. */
158 size_t memDumpMaxLenBytes = headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
159
160 std::vector<uint8_t > memDump(memDumpMaxLenBytes);
161 size_t undefMemDumpBytesWritten = 0;
162 caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLenBytes);
163 caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDump.data());
164 caseContext.Set<size_t*>("MEM_DUMP_BYTE_WRITTEN", &undefMemDumpBytesWritten);
165
166 /* Inference. */
167 REQUIRE(arm::app::NoiseReductionHandler(caseContext, false));
168
169 /* The expected output after post-processing. */
170 std::vector<int16_t> golden(&ofms[infIndex][0], &ofms[infIndex][0] + g_FrameLength);
171
172 size_t startOfLastInfOut = undefMemDumpBytesWritten - oneInferenceOutSizeBytes;
173
174 /* The actual result from the usecase handler. */
175 std::vector<int16_t> runtime(g_FrameLength);
176 std::memcpy(runtime.data(), &memDump[startOfLastInfOut], oneInferenceOutSizeBytes);
177
Richard Burton033c9152021-12-07 14:04:44 +0000178 /* Margin of 43 is 0.07% error. */
179 REQUIRE_THAT(golden, Catch::Matchers::Approx(runtime).margin(43));
Richard Burton00553462021-11-10 16:27:14 +0000180 }
181 ++infIndex;
182 }
183}
184
185TEST_CASE("Inference by index - one inference", "[RNNoise]")
186{
187 auto totalAudioSize = get_audio_array_size(1);
188 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
189
190 /* Run 1 inference */
191 std::vector<uint32_t> numberOfInferences = {1};
192 testInfByIndex(numberOfInferences);
193}
194
195TEST_CASE("Inference by index - several inferences", "[RNNoise]")
196{
197 auto totalAudioSize = get_audio_array_size(1);
198 REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
199
200 /* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
201 uint32_t totalInferences = totalAudioSize / g_FrameLength;
202 std::vector<uint32_t> numberOfInferences = {1, 2, totalInferences};
203 testInfByIndex(numberOfInferences);
204}