blob: a82048cd81f3bcb7b35dc515f871edee7bbe203a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "ActivationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012#include <backendsCommon/test/ActivationFixture.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013#include <backendsCommon/test/TensorCopyUtils.hpp>
14#include <backendsCommon/test/WorkloadTestUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010018#include <boost/multi_array.hpp>
19
telsoa014fcda012018-03-09 14:13:49 +000020#include <algorithm>
21
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000022template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<T, 4> BoundedReLuTestCommon(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 float upperBound,
27 float lowerBound,
28 float inputScale,
29 int32_t inputOffset,
30 float outputScale,
31 int32_t outputOffset,
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +000038{
Jan Eilers8eb25602020-03-09 12:13:48 +000039 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000040 unsigned int outputWidth = inputWidth;
41 unsigned int outputHeight = inputHeight;
42 unsigned int outputChannels = inputChannels;
43 unsigned int outputBatchSize = inputBatchSize;
44
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000045 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000046
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000047 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000048
49 if(armnn::IsQuantizedType<T>())
50 {
51 inputTensorInfo.SetQuantizationScale(inputScale);
52 inputTensorInfo.SetQuantizationOffset(inputOffset);
53
54 outputTensorInfo.SetQuantizationScale(outputScale);
55 outputTensorInfo.SetQuantizationOffset(outputOffset);
56 }
57
58 LayerTestResult<T, 4> result(inputTensorInfo);
59
60 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61
62 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
63 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64
telsoa01c577f2c2018-08-31 09:22:23 +010065 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +000066 armnn::ActivationQueueDescriptor descriptor;
67 armnn::WorkloadInfo workloadInfo;
68 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
69 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70
71 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
72 descriptor.m_Parameters.m_A = upperBound;
73 descriptor.m_Parameters.m_B = lowerBound;
74
75 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
76
77 inputHandle->Allocate();
78 outputHandle->Allocate();
79
80 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
81
82 workload->Execute();
83
84 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
85
86 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
87
88 return result;
89}
90
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000091LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
92 armnn::IWorkloadFactory& workloadFactory,
93 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000094{
95 unsigned int inputWidth = 4u;
96 unsigned int inputHeight = 5u;
97 unsigned int inputChannels = 1u;
98 unsigned int inputBatchSize = 1;
99
100 std::vector<float> input = std::vector<float>{
101 -2.0f, 0.1f, 0.5f, 1.25f,
102 0.786f, 0.9875f, -1.5f, 0.384f,
103 1.0001f, 3.5f, 7.5f, 0.896f,
104 2.126f, 2.0f, 0.3f, 0.15f,
105 0.999f, 1.2f, 0.89f, 6.1f,
106 };
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000109 std::vector<float> output = std::vector<float>{
110 -1.0f, 0.1f, 0.5f, 1.0f,
111 0.786f, 0.9875f, -1.0f, 0.384f,
112 1.0f, 1.0f, 1.0f, 0.896f,
113 1.0f, 1.0f, 0.3f, 0.15f,
114 0.999f, 1.0f, 0.89f, 1.0f,
115 };
116
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000117 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000118 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
119 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000120}
121
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000122LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
123 armnn::IWorkloadFactory& workloadFactory,
124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000125{
126 unsigned int inputWidth = 4u;
127 unsigned int inputHeight = 5u;
128 unsigned int inputChannels = 1u;
129 unsigned int inputBatchSize = 1;
130
131 std::vector<float> input = std::vector<float>{
132 -1.0f, 0.1f, 0.5f, 6.25f,
133 0.786f, 5.9875f, -0.5f, 0.384f,
134 6.0001f, 3.5f, 7.5f, 0.896f,
135 2.126f, 12.0f, 0.3f, 0.15f,
136 0.999f, 1.2f, 0.89f, 6.1f,
137 };
138
David Beckac42efd2018-09-26 17:41:13 +0100139 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000140 std::vector<float> output = std::vector<float>{
141 0.0f, 0.1f, 0.5f, 6.0f,
142 0.786f, 5.9875f, 0.0f, 0.384f,
143 6.0f, 3.5f, 6.0f, 0.896f,
144 2.126f, 6.0f, 0.3f, 0.15f,
145 0.999f, 1.2f, 0.89f, 6.0f,
146 };
147
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000148 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000149 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
150 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000151}
152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000153LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
154 armnn::IWorkloadFactory& workloadFactory,
155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000156{
157 unsigned int inputWidth = 3u;
158 unsigned int inputHeight = 2u;
159 unsigned int inputChannels = 1u;
160 unsigned int inputBatchSize = 1;
161
162 std::vector<uint8_t> input = std::vector<uint8_t>{
163 51, 124, 28,
164 251, 8, 92
165 };
166
David Beckac42efd2018-09-26 17:41:13 +0100167 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000168 std::vector<uint8_t> output = std::vector<uint8_t>{
169 0, 122, 0,
170 255, 0, 58
171 };
172
173 float inputScale = 12.0f / 255.0f;
174 int32_t inputOffset = 63;
175 float outputScale = 6.0f / 255.0f;
176 int32_t outputOffset = 0;
177
Derek Lambertif90c56d2020-01-10 17:14:08 +0000178 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000179 workloadFactory, memoryManager, 6.0f, 0.0f,
180 inputScale, inputOffset, outputScale, outputOffset,
181 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000182}
183
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000184LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
185 armnn::IWorkloadFactory& workloadFactory,
186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000187{
188 unsigned int inputWidth = 3u;
189 unsigned int inputHeight = 2u;
190 unsigned int inputChannels = 1u;
191 unsigned int inputBatchSize = 1;
192
193 std::vector<uint8_t> input = std::vector<uint8_t>{
194 51, 230, 28,
195 251, 8, 92
196 };
197
telsoa01c577f2c2018-08-31 09:22:23 +0100198 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000199 std::vector<uint8_t> output = std::vector<uint8_t>{
200 51, 192, 32,
201 192, 32, 92
202 };
203
204 int32_t inputOffset = 112;
205 float inputScale = 0.0125f;
206
Derek Lambertif90c56d2020-01-10 17:14:08 +0000207 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000208 workloadFactory, memoryManager, 1.0f, -1.0f,
209 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
210 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000211}
212
213namespace
214{
215
216struct BoundedReLuRandomInputTestTraits
217{
218 constexpr static unsigned int inputHeight = 31u;
219 constexpr static unsigned int inputWidth = 19u;
220 constexpr static unsigned int inputChannels = 4u;
221 constexpr static unsigned int inputBatchSize = 2;
222
223 constexpr static unsigned int outputHeight = inputHeight;
224 constexpr static unsigned int outputWidth = inputWidth;
225 constexpr static unsigned int outputChannels = inputChannels;
226 constexpr static unsigned int outputBatchSize = inputBatchSize;
227
228 static armnn::TensorInfo GetInputTensorInfo()
229 {
230 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
231 armnn::DataType::Float32);
232 }
233
234 static armnn::TensorInfo GetOutputTensorInfo()
235 {
236 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
237 armnn::DataType::Float32);
238 }
239};
240
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000241boost::multi_array<float, 4> BoundedReLuRandomInputTest(
242 armnn::IWorkloadFactory& workloadFactory,
243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
244 float lowerBound,
245 float upperBound,
246 const armnn::ActivationDescriptor& activationDescriptor)
telsoa014fcda012018-03-09 14:13:49 +0000247{
Jan Eilers8eb25602020-03-09 12:13:48 +0000248 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000249 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
250 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
251
252 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
253
telsoa01c577f2c2018-08-31 09:22:23 +0100254 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
255 // range [lowerBound, upperBound].
telsoa014fcda012018-03-09 14:13:49 +0000256 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
257
258 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
259 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
260
telsoa01c577f2c2018-08-31 09:22:23 +0100261 // Set up bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000262 armnn::ActivationQueueDescriptor descriptor;
263 armnn::WorkloadInfo workloadInfo;
264 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
265 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
266 descriptor.m_Parameters = activationDescriptor;
267
268 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
269
270 inputHandle->Allocate();
271 outputHandle->Allocate();
272
273 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
274
275 workload->Execute();
276
277 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
278
279 return output;
280}
281
282} // namespace
283
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000284LayerTestResult<float, 4> CompareBoundedReLuTest(
285 armnn::IWorkloadFactory& workloadFactory,
286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
287 armnn::IWorkloadFactory& refWorkloadFactory,
288 float upperBound,
289 float lowerBound)
telsoa014fcda012018-03-09 14:13:49 +0000290{
291 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
292
293 armnn::ActivationDescriptor activationDescriptor;
294 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
295 activationDescriptor.m_A = upperBound;
296 activationDescriptor.m_B = lowerBound;
297
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000298 result.output = BoundedReLuRandomInputTest(
299 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
300 result.outputExpected = BoundedReLuRandomInputTest(
301 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
telsoa014fcda012018-03-09 14:13:49 +0000302
303 return result;
304}
305
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000306template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000307LayerTestResult<T,4> ConstantLinearActivationTestCommon(
308 armnn::IWorkloadFactory& workloadFactory,
309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
310 float qScale = 0.0f,
311 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000312{
Jan Eilers8eb25602020-03-09 12:13:48 +0000313 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000314 unsigned int inputHeight = 20;
315 unsigned int inputWidth = 17;
316 unsigned int inputChannels = 3;
317 unsigned int batchSize = 5;
318
319 armnn::TensorInfo inputTensorInfo;
320 armnn::TensorInfo outputTensorInfo;
321
322 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
323
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000324 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
325 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000326
327 // Set quantization parameters if the requested type is a quantized type.
328 if(armnn::IsQuantizedType<T>())
329 {
330 inputTensorInfo.SetQuantizationScale(qScale);
331 inputTensorInfo.SetQuantizationOffset(qOffset);
332 outputTensorInfo.SetQuantizationScale(qScale);
333 outputTensorInfo.SetQuantizationOffset(qOffset);
334 }
335
336 LayerTestResult<T, 4> ret(outputTensorInfo);
337
338 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
339 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
340
telsoa01c577f2c2018-08-31 09:22:23 +0100341 // Do linear activation that should leave the tensor unchanged.
telsoa014fcda012018-03-09 14:13:49 +0000342 armnn::ActivationQueueDescriptor data;
343 armnn::WorkloadInfo info;
344 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
345 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
346 data.m_Parameters.m_A = 1.0f;
347 data.m_Parameters.m_B = 0.0f;
348 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
349
350 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
351
352 inputHandle->Allocate();
353 outputHandle->Allocate();
354
355 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
356 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
357
358 workload->Execute();
359
360 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
361
telsoa01c577f2c2018-08-31 09:22:23 +0100362 // Ensure output equals input.
telsoa014fcda012018-03-09 14:13:49 +0000363 ret.outputExpected = input;
364
365 return ret;
366}
367
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000368LayerTestResult<float, 4> ConstantLinearActivationTest(
369 armnn::IWorkloadFactory& workloadFactory,
370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000371{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000372 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000378{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000379 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000380 workloadFactory, memoryManager, 4.0f, 3);
telsoa014fcda012018-03-09 14:13:49 +0000381}
382
Teresa Charlin18515e22019-04-24 10:17:46 +0100383LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
384 armnn::IWorkloadFactory& workloadFactory,
385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
386{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000387 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
Teresa Charlin18515e22019-04-24 10:17:46 +0100388 workloadFactory, memoryManager, 0.1f, 0);
389}
390
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000392LayerTestResult<T, 4> SimpleActivationTest(
393 armnn::IWorkloadFactory& workloadFactory,
394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
395 armnn::ActivationFunction activationFunction,
396 float activationParameterA,
397 float activationParameterB,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100398 float scale,
399 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000400 const std::vector<float>& inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100401 float outScale,
402 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000403 const std::vector<float>& outputExpectedData)
telsoa014fcda012018-03-09 14:13:49 +0000404{
Jan Eilers8eb25602020-03-09 12:13:48 +0000405 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000406 constexpr static unsigned int inputWidth = 16u;
407 constexpr static unsigned int inputHeight = 1u;
408 constexpr static unsigned int inputChannels = 1u;
409 constexpr static unsigned int inputBatchSize = 1u;
410
411 constexpr static unsigned int outputWidth = inputWidth;
412 constexpr static unsigned int outputHeight = inputHeight;
413 constexpr static unsigned int outputChannels = inputChannels;
414 constexpr static unsigned int outputBatchSize = inputBatchSize;
415
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000416 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
417 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000418
419 // Set quantization parameters if the requested type is a quantized type.
420 if(armnn::IsQuantizedType<T>())
421 {
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100422 inputTensorInfo.SetQuantizationScale(scale);
423 inputTensorInfo.SetQuantizationOffset(offset);
424 outputTensorInfo.SetQuantizationScale(outScale);
425 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +0000426 }
427
428 LayerTestResult<T, 4> result(inputTensorInfo);
429
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100430 auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
telsoa014fcda012018-03-09 14:13:49 +0000431
432 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
433 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
434
telsoa01c577f2c2018-08-31 09:22:23 +0100435 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000436 armnn::ActivationQueueDescriptor descriptor;
437 armnn::WorkloadInfo workloadInfo;
438 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
439 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
440
441 descriptor.m_Parameters.m_Function = activationFunction;
442 descriptor.m_Parameters.m_A = activationParameterA;
443 descriptor.m_Parameters.m_B = activationParameterB;
444
445 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
446
447 inputHandle->Allocate();
448 outputHandle->Allocate();
449
450 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
451
452 workload->Execute();
453
454 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
455
telsoa01c577f2c2018-08-31 09:22:23 +0100456 // Calculated manually.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100457 result.outputExpected =
458 MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
telsoa014fcda012018-03-09 14:13:49 +0000459
460 return result;
461}
462
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000464LayerTestResult<T, 4> SimpleSigmoidTestCommon(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467 float qScale,
468 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100470 std::vector<float> inputData =
471 {
telsoa014fcda012018-03-09 14:13:49 +0000472 -0.1f, -0.2f, -0.3f, -0.4f,
473 0.1f, 0.2f, 0.3f, 0.4f,
474 -1.0f, -2.0f, -3.0f, -4.0f,
475 1.0f, 2.0f, 3.0f, 4.0f
476 };
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478 // Calculate output values for input.
telsoa014fcda012018-03-09 14:13:49 +0000479 auto f = [](float value)
480 {
481 return 1.0f / (1.0f + std::exp(-value));
482 };
483 std::vector<float> outputExpectedData(inputData.size());
484 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
485
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000486 return SimpleActivationTest<ArmnnType>(workloadFactory,
487 memoryManager,
488 armnn::ActivationFunction::Sigmoid,
489 0.f,
490 0.f,
491 qScale,
492 qOffset,
493 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100494 1.f / 256.f,
495 0,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000496 outputExpectedData);
telsoa014fcda012018-03-09 14:13:49 +0000497}
498
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000499LayerTestResult<float, 4> SimpleSigmoidTest(
500 armnn::IWorkloadFactory& workloadFactory,
501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000503 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000504}
505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000506LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
507 armnn::IWorkloadFactory& workloadFactory,
508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000509{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000510 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000511}
512
Teresa Charlin18515e22019-04-24 10:17:46 +0100513LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
514 armnn::IWorkloadFactory& workloadFactory,
515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
516{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000517 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100518}
519
520template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
521LayerTestResult<T, 4> ReLuTestCommon(
522 armnn::IWorkloadFactory& workloadFactory,
523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
524 float qScale,
525 int32_t qOffset)
526{
527 std::vector<float> inputData = {
528 -0.1f, -0.2f, -0.3f, -0.4f,
529 0.1f, 0.2f, 0.3f, 0.4f,
530 -1.0f, -2.0f, -3.0f, -4.0f,
531 1.0f, 2.0f, 3.0f, 4.0f
532 };
533
534 // Calculate output values for input.
535 auto f = [](float value)
536 {
537 return std::fmax(0.0f, value);
538 };
539 std::vector<float> outputExpectedData(inputData.size());
540 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
541
542 return SimpleActivationTest<ArmnnType>(workloadFactory,
543 memoryManager,
544 armnn::ActivationFunction::ReLu,
545 0.f,
546 0.f,
547 qScale,
548 qOffset,
549 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100550 qScale,
551 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100552 outputExpectedData);
553}
554
555LayerTestResult<int16_t, 4> ReLuInt16Test(
556 armnn::IWorkloadFactory& workloadFactory,
557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
558{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000559 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100560}
561
562
konsof017f6db402019-06-07 15:15:58 +0100563LayerTestResult<uint8_t, 4> ReLuUint8Test(
564 armnn::IWorkloadFactory& workloadFactory,
565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
566{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000567 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
konsof017f6db402019-06-07 15:15:58 +0100568}
569
570LayerTestResult<float, 4> ReLuTest(
571 armnn::IWorkloadFactory& workloadFactory,
572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
573{
574 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
575}
576
577
Teresa Charlin18515e22019-04-24 10:17:46 +0100578template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
579LayerTestResult<T, 4> BoundedReLuTestCommon(
580 armnn::IWorkloadFactory& workloadFactory,
581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
582 float qScale,
583 int32_t qOffset)
584{
585 std::vector<float> inputData = {
586 -0.1f, -0.2f, -0.3f, -0.4f,
587 0.1f, 0.2f, 0.3f, 0.4f,
588 -1.0f, -2.0f, -3.0f, -4.0f,
589 1.0f, 2.0f, 3.0f, 4.0f
590 };
591 const float a = 1.0f;
592 const float b = -1.0f;
593 // Calculate output values for input.
594 auto f = [a, b](float value)
595 {
596 return std::min(a, std::max(b, value));
597 };
598 std::vector<float> outputExpectedData(inputData.size());
599 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
600
601 return SimpleActivationTest<ArmnnType>(workloadFactory,
602 memoryManager,
603 armnn::ActivationFunction::BoundedReLu,
604 a,
605 b,
606 qScale,
607 qOffset,
608 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100609 qScale,
610 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100611 outputExpectedData);
612}
613
614LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
617{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000618 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100619}
620
621
622
623template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
624LayerTestResult<T, 4> SoftReLuTestCommon(
625 armnn::IWorkloadFactory& workloadFactory,
626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
627 float qScale,
628 int32_t qOffset)
629{
630 std::vector<float> inputData = {
631 -0.1f, -0.2f, -0.3f, -0.4f,
632 0.1f, 0.2f, 0.3f, 0.4f,
633 -1.0f, -2.0f, -3.0f, -4.0f,
634 1.0f, 2.0f, 3.0f, 4.0f
635 };
636
637 // Calculate output values for input.
638 auto f = [](float value)
639 {
640 return std::log(1.0f + std::exp(value));
641 };
642 std::vector<float> outputExpectedData(inputData.size());
643 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
644
645 return SimpleActivationTest<ArmnnType>(workloadFactory,
646 memoryManager,
647 armnn::ActivationFunction::SoftReLu,
648 0.f,
649 0.f,
650 qScale,
651 qOffset,
652 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100653 qScale,
654 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100655 outputExpectedData);
656}
657
konsof017f6db402019-06-07 15:15:58 +0100658LayerTestResult<float, 4> SoftReLuTest(
659 armnn::IWorkloadFactory& workloadFactory,
660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
661{
662 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
663}
664
665LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
666 armnn::IWorkloadFactory& workloadFactory,
667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
668{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000669 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
konsof017f6db402019-06-07 15:15:58 +0100670}
671
Teresa Charlin18515e22019-04-24 10:17:46 +0100672LayerTestResult<int16_t, 4> SoftReLuInt16Test(
673 armnn::IWorkloadFactory& workloadFactory,
674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
675{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000676 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100677}
678
679template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
680LayerTestResult<T, 4> LeakyReLuTestCommon(
681 armnn::IWorkloadFactory& workloadFactory,
682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
683 float qScale,
684 int32_t qOffset)
685{
686 std::vector<float> inputData = {
687 -0.1f, -0.2f, -0.3f, -0.4f,
688 0.1f, 0.2f, 0.3f, 0.4f,
689 -1.0f, -2.0f, -3.0f, -4.0f,
690 1.0f, 2.0f, 3.0f, 4.0f
691 };
692
693 const float a = 0.01f;
694 // Calculate output values for input.
695 auto f = [a](float value)
696 {
697 return value > 0.0f ? value : (value * a);
698 };
699 std::vector<float> outputExpectedData(inputData.size());
700 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
701
702 return SimpleActivationTest<ArmnnType>(workloadFactory,
703 memoryManager,
704 armnn::ActivationFunction::LeakyReLu,
705 a,
706 0.f,
707 qScale,
708 qOffset,
709 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100710 qScale,
711 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100712 outputExpectedData);
713}
714
konsof017f6db402019-06-07 15:15:58 +0100715LayerTestResult<float, 4> LeakyReLuTest(
716 armnn::IWorkloadFactory& workloadFactory,
717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
718{
719 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
720}
721
722LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
723 armnn::IWorkloadFactory& workloadFactory,
724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
725{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000726 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
konsof017f6db402019-06-07 15:15:58 +0100727}
728
Teresa Charlin18515e22019-04-24 10:17:46 +0100729LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
730 armnn::IWorkloadFactory& workloadFactory,
731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
732{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000733 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100734}
735
736template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
737LayerTestResult<T, 4> AbsTestCommon(
738 armnn::IWorkloadFactory& workloadFactory,
739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
740 float qScale,
741 int32_t qOffset)
742{
743 std::vector<float> inputData = {
744 -0.1f, -0.2f, -0.3f, -0.4f,
745 0.1f, 0.2f, 0.3f, 0.4f,
746 -1.0f, -2.0f, -3.0f, -4.0f,
747 1.0f, 2.0f, 3.0f, 4.0f
748 };
749
750 // Calculate output values for input.
751 auto f = [](float value)
752 {
753 return std::abs(value);
754 };
755 std::vector<float> outputExpectedData(inputData.size());
756 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
757
758 return SimpleActivationTest<ArmnnType>(workloadFactory,
759 memoryManager,
760 armnn::ActivationFunction::Abs,
761 0.f,
762 0.f,
763 qScale,
764 qOffset,
765 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100766 qScale,
767 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100768 outputExpectedData);
769}
770
konsof017f6db402019-06-07 15:15:58 +0100771LayerTestResult<float, 4> AbsTest(
772 armnn::IWorkloadFactory& workloadFactory,
773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
774{
775 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
776}
777
778LayerTestResult<uint8_t, 4> AbsUint8Test(
779 armnn::IWorkloadFactory& workloadFactory,
780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
781{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000782 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
konsof017f6db402019-06-07 15:15:58 +0100783}
784
Teresa Charlin18515e22019-04-24 10:17:46 +0100785LayerTestResult<int16_t, 4> AbsInt16Test(
786 armnn::IWorkloadFactory& workloadFactory,
787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
788{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000789 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100790}
791
Sadik Armagan6095ba52019-09-13 17:07:19 +0100792LayerTestResult<float, 5> SqrtNNTest(
793 armnn::IWorkloadFactory& workloadFactory,
794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
795{
Jan Eilers8eb25602020-03-09 12:13:48 +0000796 IgnoreUnused(memoryManager);
Sadik Armagan6095ba52019-09-13 17:07:19 +0100797 const int inputDataSize = 120;
798 std::vector<float> inputData(inputDataSize);
799
800 for (unsigned int i = 0u; i < inputDataSize; ++i)
801 {
802 inputData[i] = static_cast<float>(i) / 10;
803 }
804
805 auto f = [](float value)
806 {
807 return std::sqrt(value);
808 };
809 std::vector<float> outputExpectedData(inputDataSize);
810 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
811
812 armnn::TensorInfo inputTensorInfo(
813 { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
814 armnn::TensorInfo outputTensorInfo(
815 { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
816
817 LayerTestResult<float, 5> result(inputTensorInfo);
818
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100819 auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
Sadik Armagan6095ba52019-09-13 17:07:19 +0100820
821 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
823
824 armnn::ActivationQueueDescriptor descriptor;
825 armnn::WorkloadInfo workloadInfo;
826 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
827 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
828
829 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::Sqrt;
830
831 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
832
833 inputHandle->Allocate();
834 outputHandle->Allocate();
835
836 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
837
838 workload->Execute();
839
840 CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
841
842 // Calculated manually.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100843 result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
Sadik Armagan6095ba52019-09-13 17:07:19 +0100844
845 return result;
846};
847
Teresa Charlin18515e22019-04-24 10:17:46 +0100848template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
849LayerTestResult<T, 4> SqrtTestCommon(
850 armnn::IWorkloadFactory& workloadFactory,
851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
852 float qScale,
853 int32_t qOffset)
854{
855 std::vector<float> inputData = {
856 0.1f, 0.2f, 0.3f, 0.4f,
857 0.1f, 0.2f, 0.3f, 0.4f,
858 1.0f, 2.0f, 3.0f, 4.0f,
859 1.0f, 2.0f, 3.0f, 4.0f
860 };
861
862 // Calculate output values for input.
863 auto f = [](float value)
864 {
865 return std::sqrt(value);
866 };
867 std::vector<float> outputExpectedData(inputData.size());
868 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
869
870 return SimpleActivationTest<ArmnnType>(workloadFactory,
871 memoryManager,
872 armnn::ActivationFunction::Sqrt,
873 0.f,
874 0.f,
875 qScale,
876 qOffset,
877 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100878 qScale,
879 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100880 outputExpectedData);
881}
882
konsof017f6db402019-06-07 15:15:58 +0100883LayerTestResult<float, 4> SqrtTest(
884 armnn::IWorkloadFactory& workloadFactory,
885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
886{
887 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
888}
889
890LayerTestResult<uint8_t, 4> SqrtUint8Test(
891 armnn::IWorkloadFactory& workloadFactory,
892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
893{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000894 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
konsof017f6db402019-06-07 15:15:58 +0100895}
896
Teresa Charlin18515e22019-04-24 10:17:46 +0100897LayerTestResult<int16_t, 4> SqrtInt16Test(
898 armnn::IWorkloadFactory& workloadFactory,
899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
900{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000901 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100902}
903
904template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
905LayerTestResult<T, 4> SquareTestCommon(
906 armnn::IWorkloadFactory& workloadFactory,
907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
908 float qScale,
909 int32_t qOffset)
910{
911 std::vector<float> inputData = {
912 -0.1f, -0.2f, -0.3f, -0.4f,
913 0.1f, 0.2f, 0.3f, 0.4f,
914 -1.0f, -2.0f, -3.0f, -4.0f,
915 1.0f, 2.0f, 3.0f, 4.0f
916 };
917
918 // Calculate output values for input.
919 auto f = [](float value)
920 {
921 return std::pow(value,2);
922 };
923 std::vector<float> outputExpectedData(inputData.size());
924 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
925
926 return SimpleActivationTest<ArmnnType>(workloadFactory,
927 memoryManager,
928 armnn::ActivationFunction::Square,
929 0.f,
930 0.f,
931 qScale,
932 qOffset,
933 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100934 qScale,
935 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100936 outputExpectedData);
937}
938
konsof017f6db402019-06-07 15:15:58 +0100939LayerTestResult<float, 4> SquareTest(
940 armnn::IWorkloadFactory& workloadFactory,
941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
942{
943 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
944}
945
946LayerTestResult<uint8_t, 4> SquareUint8Test(
947 armnn::IWorkloadFactory& workloadFactory,
948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
949{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000950 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
konsof017f6db402019-06-07 15:15:58 +0100951}
952
Teresa Charlin18515e22019-04-24 10:17:46 +0100953LayerTestResult<int16_t, 4> SquareInt16Test(
954 armnn::IWorkloadFactory& workloadFactory,
955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
956{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000957 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +0100958}
959
960template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
961LayerTestResult<T, 4> TanhTestCommon(
962 armnn::IWorkloadFactory& workloadFactory,
963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
964 float qScale,
965 int32_t qOffset)
966{
967 std::vector<float> inputData = {
968 -0.1f, -0.2f, -0.3f, -0.4f,
969 0.1f, 0.2f, 0.3f, 0.4f,
970 -1.0f, -2.0f, -3.0f, -4.0f,
971 1.0f, 2.0f, 3.0f, 4.0f
972 };
973
974 const float a = 2.0f;
975 const float b = 3.0f;
976 // Calculate output values for input.
977 auto f = [a, b](float value)
978 {
979 return a * tanhf(b * value);
980 };
981 std::vector<float> outputExpectedData(inputData.size());
982 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
983
984 return SimpleActivationTest<ArmnnType>(workloadFactory,
985 memoryManager,
986 armnn::ActivationFunction::TanH,
987 a,
988 b,
989 qScale,
990 qOffset,
991 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100992 qScale,
993 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100994 outputExpectedData);
995}
996
konsof017f6db402019-06-07 15:15:58 +0100997LayerTestResult<float, 4> TanhTest(
998 armnn::IWorkloadFactory& workloadFactory,
999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1000{
1001 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1002}
1003
1004LayerTestResult<uint8_t, 4> TanhUint8Test(
1005 armnn::IWorkloadFactory& workloadFactory,
1006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1007{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001008 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
konsof017f6db402019-06-07 15:15:58 +01001009}
1010
Teresa Charlin18515e22019-04-24 10:17:46 +01001011LayerTestResult<int16_t, 4> TanhInt16Test(
1012 armnn::IWorkloadFactory& workloadFactory,
1013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1014{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001015 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
Teresa Charlin18515e22019-04-24 10:17:46 +01001016}
1017
1018
David Monahan3b3c3812020-02-25 09:03:29 +00001019template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1020LayerTestResult<T, 4> EluTestCommon(
1021 armnn::IWorkloadFactory& workloadFactory,
1022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1023 float qScale,
1024 int32_t qOffset)
1025{
1026 std::vector<float> inputData = {
1027 -0.1f, -0.2f, -0.3f, -0.4f,
1028 0.1f, 0.2f, 0.3f, 0.4f,
1029 -1.0f, -2.0f, -3.0f, -4.0f,
1030 1.0f, 2.0f, 3.0f, 4.0f
1031 };
1032
1033
1034 const float a = 0.01f;
1035 // Calculate output values for input.
1036 auto f = [a](float value)
1037 {
1038 return (value >= 0) ? value : a * (expf(value) - 1);
1039 };
1040 std::vector<float> outputExpectedData(inputData.size());
1041 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1042
1043 return SimpleActivationTest<ArmnnType>(workloadFactory,
1044 memoryManager,
1045 armnn::ActivationFunction::Elu,
1046 a,
1047 0.0f,
1048 qScale,
1049 qOffset,
1050 inputData,
1051 qScale,
1052 qOffset,
1053 outputExpectedData);
1054}
1055
1056LayerTestResult<float, 4> EluTest(
1057 armnn::IWorkloadFactory& workloadFactory,
1058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1059{
1060 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1061}
1062
1063LayerTestResult<uint8_t, 4> EluUint8Test(
1064 armnn::IWorkloadFactory& workloadFactory,
1065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1066{
1067 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1068}
1069
1070LayerTestResult<int16_t, 4> EluInt16Test(
1071 armnn::IWorkloadFactory& workloadFactory,
1072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1073{
1074 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1075}
1076
Teresa Charlin18515e22019-04-24 10:17:46 +01001077
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001078template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Colm Donelan03fbeaf2020-02-26 15:39:23 +00001079LayerTestResult<T, 4> HardSwishTestCommon(
1080 armnn::IWorkloadFactory& workloadFactory,
1081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1082 float qScale,
1083 int32_t qOffset)
1084{
1085 std::vector<float> inputData = {
1086 -0.1f, -0.2f, -0.3f, -0.4f,
1087 0.1f, 0.2f, 0.3f, 0.4f,
1088 -1.0f, -2.0f, -3.0f, -4.0f,
1089 1.0f, 2.0f, 3.0f, 4.0f
1090 };
1091 // Calculate output values for input.
1092 auto f = [](float x)
1093 {
1094 // Break down the calculation to help with verification.
1095 // hard_swish(x) = x * relu6(x+3) / 6
1096 // relu6(x) = min(max(x,0),6)
1097 float reLu6_step1 = std::max((x + 3),0.0f);
1098 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1099 float hardSwish_step1 = x * reLu6Complete;
1100 float result = hardSwish_step1 / 6;
1101 return result;
1102 };
1103 std::vector<float> outputExpectedData(inputData.size());
1104 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1105
1106 return SimpleActivationTest<ArmnnType>(workloadFactory,
1107 memoryManager,
1108 armnn::ActivationFunction::HardSwish,
1109 0.f,
1110 0.f,
1111 qScale,
1112 qOffset,
1113 inputData,
1114 qScale,
1115 qOffset,
1116 outputExpectedData);
1117}
1118
1119LayerTestResult<float, 4> HardSwishTest(
1120 armnn::IWorkloadFactory& workloadFactory,
1121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1122{
1123 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1124}
1125
1126LayerTestResult<uint8_t, 4> HardSwishUint8Test(
1127 armnn::IWorkloadFactory& workloadFactory,
1128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1129{
1130 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1131}
1132
1133LayerTestResult<int16_t, 4> HardSwishInt16Test(
1134 armnn::IWorkloadFactory& workloadFactory,
1135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1136{
1137 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1138}
1139
1140
1141template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001142LayerTestResult<T,4> CompareActivationTestImpl(
1143 armnn::IWorkloadFactory& workloadFactory,
1144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1145 armnn::IWorkloadFactory& refWorkloadFactory,
1146 armnn::ActivationFunction f,
1147 unsigned int batchSize = 5,
1148 float qScale = 0.0f,
1149 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001150{
Jan Eilers8eb25602020-03-09 12:13:48 +00001151 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001152 unsigned int width = 17;
1153 unsigned int height = 29;
1154 unsigned int channels = 2;
1155
1156 float a = 0.234f;
1157 float b = -12.345f;
1158
1159 armnn::TensorInfo inputTensorInfo;
1160 armnn::TensorInfo outputTensorInfo;
1161
1162 unsigned int shape[] = {batchSize, channels, height, width};
1163
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001164 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1165 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001166
1167 // Set quantization parameters if the requested type is a quantized type.
1168 if(armnn::IsQuantizedType<T>())
1169 {
1170 inputTensorInfo.SetQuantizationScale(qScale);
1171 inputTensorInfo.SetQuantizationOffset(qOffset);
1172 outputTensorInfo.SetQuantizationScale(qScale);
1173 outputTensorInfo.SetQuantizationOffset(qOffset);
1174 }
1175
1176 float minVal = -10.f;
1177 if (f == armnn::ActivationFunction::Sqrt)
1178 {
1179 minVal = 0.f;
1180 }
1181
1182 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1183
1184
1185 LayerTestResult<T,4> ret(outputTensorInfo);
1186 auto boostArrayExtents = boost::extents
1187 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1188 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1189 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1190 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1191 ret.output.resize(boostArrayExtents);
1192 ret.outputExpected.resize(boostArrayExtents);
1193
1194
1195 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1196 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1197
1198 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1199 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1200
1201 armnn::ActivationQueueDescriptor data;
1202 armnn::WorkloadInfo info;
1203 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1204 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1205 data.m_Parameters.m_A = a;
1206 data.m_Parameters.m_B = b;
1207 data.m_Parameters.m_Function = f;
1208
1209 armnn::ActivationQueueDescriptor refData = data;
1210 armnn::WorkloadInfo refInfo = info;
1211 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1212 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1213
1214 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001215 ARMNN_ASSERT(workload != nullptr);
telsoa014fcda012018-03-09 14:13:49 +00001216 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001217 ARMNN_ASSERT(workloadRef != nullptr);
telsoa014fcda012018-03-09 14:13:49 +00001218
1219 inputHandle->Allocate();
1220 outputHandle->Allocate();
1221 inputHandleRef->Allocate();
1222 outputHandleRef->Allocate();
1223
1224 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1225 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1226
1227 workload->Execute();
1228 workloadRef->Execute();
1229
1230 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1231 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1232
1233 return ret;
1234}
1235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001236LayerTestResult<float,4> CompareActivationTest(
1237 armnn::IWorkloadFactory& workloadFactory,
1238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1239 armnn::IWorkloadFactory& refWorkloadFactory,
1240 armnn::ActivationFunction f,
1241 unsigned int batchSize)
telsoa014fcda012018-03-09 14:13:49 +00001242{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001243 return CompareActivationTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001244 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
telsoa014fcda012018-03-09 14:13:49 +00001245}
1246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001247LayerTestResult<uint8_t,4> CompareActivationUint8Test(
1248 armnn::IWorkloadFactory& workloadFactory,
1249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1250 armnn::IWorkloadFactory& refWorkloadFactory,
1251 armnn::ActivationFunction f)
telsoa014fcda012018-03-09 14:13:49 +00001252{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001253 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001254 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +00001255}
Teresa Charlin18515e22019-04-24 10:17:46 +01001256
1257LayerTestResult<int16_t,4> CompareActivationInt16Test(
1258 armnn::IWorkloadFactory& workloadFactory,
1259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1260 armnn::IWorkloadFactory& refWorkloadFactory,
1261 armnn::ActivationFunction f)
1262{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001263 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
Teresa Charlin18515e22019-04-24 10:17:46 +01001264 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1265}