blob: c05dfd61704e7cae708c4e851db4d369a1a307a7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "ActivationTestImpl.hpp"
7
8#include <ResolveType.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009
telsoa014fcda012018-03-09 14:13:49 +000010#include <armnn/ArmNN.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012#include <backendsCommon/test/ActivationFixture.hpp>
13#include <backendsCommon/test/QuantizeHelper.hpp>
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000017#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019#include <boost/multi_array.hpp>
20
telsoa014fcda012018-03-09 14:13:49 +000021#include <algorithm>
22
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000023template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000024LayerTestResult<T, 4> BoundedReLuTestCommon(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 float upperBound,
28 float lowerBound,
29 float inputScale,
30 int32_t inputOffset,
31 float outputScale,
32 int32_t outputOffset,
33 const std::vector<T>& inputData,
34 const std::vector<T>& outputExpectedData,
35 unsigned int inputWidth,
36 unsigned int inputHeight,
37 unsigned int inputChannels,
38 unsigned int inputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +000039{
40 unsigned int outputWidth = inputWidth;
41 unsigned int outputHeight = inputHeight;
42 unsigned int outputChannels = inputChannels;
43 unsigned int outputBatchSize = inputBatchSize;
44
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000045 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000046
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000047 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000048
49 if(armnn::IsQuantizedType<T>())
50 {
51 inputTensorInfo.SetQuantizationScale(inputScale);
52 inputTensorInfo.SetQuantizationOffset(inputOffset);
53
54 outputTensorInfo.SetQuantizationScale(outputScale);
55 outputTensorInfo.SetQuantizationOffset(outputOffset);
56 }
57
58 LayerTestResult<T, 4> result(inputTensorInfo);
59
60 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61
62 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
63 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64
telsoa01c577f2c2018-08-31 09:22:23 +010065 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +000066 armnn::ActivationQueueDescriptor descriptor;
67 armnn::WorkloadInfo workloadInfo;
68 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
69 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70
71 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
72 descriptor.m_Parameters.m_A = upperBound;
73 descriptor.m_Parameters.m_B = lowerBound;
74
75 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
76
77 inputHandle->Allocate();
78 outputHandle->Allocate();
79
80 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
81
82 workload->Execute();
83
84 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
85
86 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
87
88 return result;
89}
90
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000091LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
92 armnn::IWorkloadFactory& workloadFactory,
93 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000094{
95 unsigned int inputWidth = 4u;
96 unsigned int inputHeight = 5u;
97 unsigned int inputChannels = 1u;
98 unsigned int inputBatchSize = 1;
99
100 std::vector<float> input = std::vector<float>{
101 -2.0f, 0.1f, 0.5f, 1.25f,
102 0.786f, 0.9875f, -1.5f, 0.384f,
103 1.0001f, 3.5f, 7.5f, 0.896f,
104 2.126f, 2.0f, 0.3f, 0.15f,
105 0.999f, 1.2f, 0.89f, 6.1f,
106 };
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000109 std::vector<float> output = std::vector<float>{
110 -1.0f, 0.1f, 0.5f, 1.0f,
111 0.786f, 0.9875f, -1.0f, 0.384f,
112 1.0f, 1.0f, 1.0f, 0.896f,
113 1.0f, 1.0f, 0.3f, 0.15f,
114 0.999f, 1.0f, 0.89f, 1.0f,
115 };
116
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000117 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000118 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
119 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000120}
121
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000122LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
123 armnn::IWorkloadFactory& workloadFactory,
124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000125{
126 unsigned int inputWidth = 4u;
127 unsigned int inputHeight = 5u;
128 unsigned int inputChannels = 1u;
129 unsigned int inputBatchSize = 1;
130
131 std::vector<float> input = std::vector<float>{
132 -1.0f, 0.1f, 0.5f, 6.25f,
133 0.786f, 5.9875f, -0.5f, 0.384f,
134 6.0001f, 3.5f, 7.5f, 0.896f,
135 2.126f, 12.0f, 0.3f, 0.15f,
136 0.999f, 1.2f, 0.89f, 6.1f,
137 };
138
David Beckac42efd2018-09-26 17:41:13 +0100139 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000140 std::vector<float> output = std::vector<float>{
141 0.0f, 0.1f, 0.5f, 6.0f,
142 0.786f, 5.9875f, 0.0f, 0.384f,
143 6.0f, 3.5f, 6.0f, 0.896f,
144 2.126f, 6.0f, 0.3f, 0.15f,
145 0.999f, 1.2f, 0.89f, 6.0f,
146 };
147
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000148 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000149 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
150 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000151}
152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000153LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
154 armnn::IWorkloadFactory& workloadFactory,
155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000156{
157 unsigned int inputWidth = 3u;
158 unsigned int inputHeight = 2u;
159 unsigned int inputChannels = 1u;
160 unsigned int inputBatchSize = 1;
161
162 std::vector<uint8_t> input = std::vector<uint8_t>{
163 51, 124, 28,
164 251, 8, 92
165 };
166
David Beckac42efd2018-09-26 17:41:13 +0100167 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000168 std::vector<uint8_t> output = std::vector<uint8_t>{
169 0, 122, 0,
170 255, 0, 58
171 };
172
173 float inputScale = 12.0f / 255.0f;
174 int32_t inputOffset = 63;
175 float outputScale = 6.0f / 255.0f;
176 int32_t outputOffset = 0;
177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000178 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
179 workloadFactory, memoryManager, 6.0f, 0.0f,
180 inputScale, inputOffset, outputScale, outputOffset,
181 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000182}
183
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000184LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
185 armnn::IWorkloadFactory& workloadFactory,
186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000187{
188 unsigned int inputWidth = 3u;
189 unsigned int inputHeight = 2u;
190 unsigned int inputChannels = 1u;
191 unsigned int inputBatchSize = 1;
192
193 std::vector<uint8_t> input = std::vector<uint8_t>{
194 51, 230, 28,
195 251, 8, 92
196 };
197
telsoa01c577f2c2018-08-31 09:22:23 +0100198 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000199 std::vector<uint8_t> output = std::vector<uint8_t>{
200 51, 192, 32,
201 192, 32, 92
202 };
203
204 int32_t inputOffset = 112;
205 float inputScale = 0.0125f;
206
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000207 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
208 workloadFactory, memoryManager, 1.0f, -1.0f,
209 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
210 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000211}
212
213namespace
214{
215
216struct BoundedReLuRandomInputTestTraits
217{
218 constexpr static unsigned int inputHeight = 31u;
219 constexpr static unsigned int inputWidth = 19u;
220 constexpr static unsigned int inputChannels = 4u;
221 constexpr static unsigned int inputBatchSize = 2;
222
223 constexpr static unsigned int outputHeight = inputHeight;
224 constexpr static unsigned int outputWidth = inputWidth;
225 constexpr static unsigned int outputChannels = inputChannels;
226 constexpr static unsigned int outputBatchSize = inputBatchSize;
227
228 static armnn::TensorInfo GetInputTensorInfo()
229 {
230 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
231 armnn::DataType::Float32);
232 }
233
234 static armnn::TensorInfo GetOutputTensorInfo()
235 {
236 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
237 armnn::DataType::Float32);
238 }
239};
240
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000241boost::multi_array<float, 4> BoundedReLuRandomInputTest(
242 armnn::IWorkloadFactory& workloadFactory,
243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
244 float lowerBound,
245 float upperBound,
246 const armnn::ActivationDescriptor& activationDescriptor)
telsoa014fcda012018-03-09 14:13:49 +0000247{
248 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
249 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
250
251 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
252
telsoa01c577f2c2018-08-31 09:22:23 +0100253 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
254 // range [lowerBound, upperBound].
telsoa014fcda012018-03-09 14:13:49 +0000255 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
256
257 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
258 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
259
telsoa01c577f2c2018-08-31 09:22:23 +0100260 // Set up bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000261 armnn::ActivationQueueDescriptor descriptor;
262 armnn::WorkloadInfo workloadInfo;
263 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
264 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
265 descriptor.m_Parameters = activationDescriptor;
266
267 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
268
269 inputHandle->Allocate();
270 outputHandle->Allocate();
271
272 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
273
274 workload->Execute();
275
276 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
277
278 return output;
279}
280
281} // namespace
282
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000283LayerTestResult<float, 4> CompareBoundedReLuTest(
284 armnn::IWorkloadFactory& workloadFactory,
285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
286 armnn::IWorkloadFactory& refWorkloadFactory,
287 float upperBound,
288 float lowerBound)
telsoa014fcda012018-03-09 14:13:49 +0000289{
290 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
291
292 armnn::ActivationDescriptor activationDescriptor;
293 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
294 activationDescriptor.m_A = upperBound;
295 activationDescriptor.m_B = lowerBound;
296
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000297 result.output = BoundedReLuRandomInputTest(
298 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
299 result.outputExpected = BoundedReLuRandomInputTest(
300 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
telsoa014fcda012018-03-09 14:13:49 +0000301
302 return result;
303}
304
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000305template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000306LayerTestResult<T,4> ConstantLinearActivationTestCommon(
307 armnn::IWorkloadFactory& workloadFactory,
308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
309 float qScale = 0.0f,
310 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000311{
312 unsigned int inputHeight = 20;
313 unsigned int inputWidth = 17;
314 unsigned int inputChannels = 3;
315 unsigned int batchSize = 5;
316
317 armnn::TensorInfo inputTensorInfo;
318 armnn::TensorInfo outputTensorInfo;
319
320 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
321
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000322 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
323 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000324
325 // Set quantization parameters if the requested type is a quantized type.
326 if(armnn::IsQuantizedType<T>())
327 {
328 inputTensorInfo.SetQuantizationScale(qScale);
329 inputTensorInfo.SetQuantizationOffset(qOffset);
330 outputTensorInfo.SetQuantizationScale(qScale);
331 outputTensorInfo.SetQuantizationOffset(qOffset);
332 }
333
334 LayerTestResult<T, 4> ret(outputTensorInfo);
335
336 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
337 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
338
telsoa01c577f2c2018-08-31 09:22:23 +0100339 // Do linear activation that should leave the tensor unchanged.
telsoa014fcda012018-03-09 14:13:49 +0000340 armnn::ActivationQueueDescriptor data;
341 armnn::WorkloadInfo info;
342 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
343 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
344 data.m_Parameters.m_A = 1.0f;
345 data.m_Parameters.m_B = 0.0f;
346 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
347
348 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
349
350 inputHandle->Allocate();
351 outputHandle->Allocate();
352
353 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
354 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
355
356 workload->Execute();
357
358 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
359
telsoa01c577f2c2018-08-31 09:22:23 +0100360 // Ensure output equals input.
telsoa014fcda012018-03-09 14:13:49 +0000361 ret.outputExpected = input;
362
363 return ret;
364}
365
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000366LayerTestResult<float, 4> ConstantLinearActivationTest(
367 armnn::IWorkloadFactory& workloadFactory,
368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000369{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000370 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000371}
372
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000373LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
374 armnn::IWorkloadFactory& workloadFactory,
375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000377 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
378 workloadFactory, memoryManager, 4.0f, 3);
telsoa014fcda012018-03-09 14:13:49 +0000379}
380
Teresa Charlin18515e22019-04-24 10:17:46 +0100381LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
382 armnn::IWorkloadFactory& workloadFactory,
383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
384{
385 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
386 workloadFactory, memoryManager, 0.1f, 0);
387}
388
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000389template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000390LayerTestResult<T, 4> SimpleActivationTest(
391 armnn::IWorkloadFactory& workloadFactory,
392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
393 armnn::ActivationFunction activationFunction,
394 float activationParameterA,
395 float activationParameterB,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100396 float scale,
397 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000398 const std::vector<float>& inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100399 float outScale,
400 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000401 const std::vector<float>& outputExpectedData)
telsoa014fcda012018-03-09 14:13:49 +0000402{
403 constexpr static unsigned int inputWidth = 16u;
404 constexpr static unsigned int inputHeight = 1u;
405 constexpr static unsigned int inputChannels = 1u;
406 constexpr static unsigned int inputBatchSize = 1u;
407
408 constexpr static unsigned int outputWidth = inputWidth;
409 constexpr static unsigned int outputHeight = inputHeight;
410 constexpr static unsigned int outputChannels = inputChannels;
411 constexpr static unsigned int outputBatchSize = inputBatchSize;
412
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000413 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
414 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000415
416 // Set quantization parameters if the requested type is a quantized type.
417 if(armnn::IsQuantizedType<T>())
418 {
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100419 inputTensorInfo.SetQuantizationScale(scale);
420 inputTensorInfo.SetQuantizationOffset(offset);
421 outputTensorInfo.SetQuantizationScale(outScale);
422 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +0000423 }
424
425 LayerTestResult<T, 4> result(inputTensorInfo);
426
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100427 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
telsoa014fcda012018-03-09 14:13:49 +0000428
429 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
430 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
431
telsoa01c577f2c2018-08-31 09:22:23 +0100432 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000433 armnn::ActivationQueueDescriptor descriptor;
434 armnn::WorkloadInfo workloadInfo;
435 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
436 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
437
438 descriptor.m_Parameters.m_Function = activationFunction;
439 descriptor.m_Parameters.m_A = activationParameterA;
440 descriptor.m_Parameters.m_B = activationParameterB;
441
442 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
443
444 inputHandle->Allocate();
445 outputHandle->Allocate();
446
447 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
448
449 workload->Execute();
450
451 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
452
telsoa01c577f2c2018-08-31 09:22:23 +0100453 // Calculated manually.
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100454 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
455 outputExpectedData));
telsoa014fcda012018-03-09 14:13:49 +0000456
457 return result;
458}
459
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000460template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000461LayerTestResult<T, 4> SimpleSigmoidTestCommon(
462 armnn::IWorkloadFactory& workloadFactory,
463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
464 float qScale,
465 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000466{
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100467 std::vector<float> inputData =
468 {
telsoa014fcda012018-03-09 14:13:49 +0000469 -0.1f, -0.2f, -0.3f, -0.4f,
470 0.1f, 0.2f, 0.3f, 0.4f,
471 -1.0f, -2.0f, -3.0f, -4.0f,
472 1.0f, 2.0f, 3.0f, 4.0f
473 };
474
telsoa01c577f2c2018-08-31 09:22:23 +0100475 // Calculate output values for input.
telsoa014fcda012018-03-09 14:13:49 +0000476 auto f = [](float value)
477 {
478 return 1.0f / (1.0f + std::exp(-value));
479 };
480 std::vector<float> outputExpectedData(inputData.size());
481 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
482
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 return SimpleActivationTest<ArmnnType>(workloadFactory,
484 memoryManager,
485 armnn::ActivationFunction::Sigmoid,
486 0.f,
487 0.f,
488 qScale,
489 qOffset,
490 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100491 1.f / 256.f,
492 0,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000493 outputExpectedData);
telsoa014fcda012018-03-09 14:13:49 +0000494}
495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000496LayerTestResult<float, 4> SimpleSigmoidTest(
497 armnn::IWorkloadFactory& workloadFactory,
498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000499{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000500 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000503LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
504 armnn::IWorkloadFactory& workloadFactory,
505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000507 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000508}
509
Teresa Charlin18515e22019-04-24 10:17:46 +0100510LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
511 armnn::IWorkloadFactory& workloadFactory,
512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
513{
514 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
515}
516
517template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
518LayerTestResult<T, 4> ReLuTestCommon(
519 armnn::IWorkloadFactory& workloadFactory,
520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
521 float qScale,
522 int32_t qOffset)
523{
524 std::vector<float> inputData = {
525 -0.1f, -0.2f, -0.3f, -0.4f,
526 0.1f, 0.2f, 0.3f, 0.4f,
527 -1.0f, -2.0f, -3.0f, -4.0f,
528 1.0f, 2.0f, 3.0f, 4.0f
529 };
530
531 // Calculate output values for input.
532 auto f = [](float value)
533 {
534 return std::fmax(0.0f, value);
535 };
536 std::vector<float> outputExpectedData(inputData.size());
537 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
538
539 return SimpleActivationTest<ArmnnType>(workloadFactory,
540 memoryManager,
541 armnn::ActivationFunction::ReLu,
542 0.f,
543 0.f,
544 qScale,
545 qOffset,
546 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100547 qScale,
548 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100549 outputExpectedData);
550}
551
552LayerTestResult<int16_t, 4> ReLuInt16Test(
553 armnn::IWorkloadFactory& workloadFactory,
554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
555{
556 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
557}
558
559
konsof017f6db402019-06-07 15:15:58 +0100560LayerTestResult<uint8_t, 4> ReLuUint8Test(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
563{
564 return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
565}
566
567LayerTestResult<float, 4> ReLuTest(
568 armnn::IWorkloadFactory& workloadFactory,
569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
570{
571 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
572}
573
574
Teresa Charlin18515e22019-04-24 10:17:46 +0100575template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
576LayerTestResult<T, 4> BoundedReLuTestCommon(
577 armnn::IWorkloadFactory& workloadFactory,
578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
579 float qScale,
580 int32_t qOffset)
581{
582 std::vector<float> inputData = {
583 -0.1f, -0.2f, -0.3f, -0.4f,
584 0.1f, 0.2f, 0.3f, 0.4f,
585 -1.0f, -2.0f, -3.0f, -4.0f,
586 1.0f, 2.0f, 3.0f, 4.0f
587 };
588 const float a = 1.0f;
589 const float b = -1.0f;
590 // Calculate output values for input.
591 auto f = [a, b](float value)
592 {
593 return std::min(a, std::max(b, value));
594 };
595 std::vector<float> outputExpectedData(inputData.size());
596 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
597
598 return SimpleActivationTest<ArmnnType>(workloadFactory,
599 memoryManager,
600 armnn::ActivationFunction::BoundedReLu,
601 a,
602 b,
603 qScale,
604 qOffset,
605 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100606 qScale,
607 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100608 outputExpectedData);
609}
610
611LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
612 armnn::IWorkloadFactory& workloadFactory,
613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
614{
615 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
616}
617
618
619
620template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
621LayerTestResult<T, 4> SoftReLuTestCommon(
622 armnn::IWorkloadFactory& workloadFactory,
623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
624 float qScale,
625 int32_t qOffset)
626{
627 std::vector<float> inputData = {
628 -0.1f, -0.2f, -0.3f, -0.4f,
629 0.1f, 0.2f, 0.3f, 0.4f,
630 -1.0f, -2.0f, -3.0f, -4.0f,
631 1.0f, 2.0f, 3.0f, 4.0f
632 };
633
634 // Calculate output values for input.
635 auto f = [](float value)
636 {
637 return std::log(1.0f + std::exp(value));
638 };
639 std::vector<float> outputExpectedData(inputData.size());
640 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
641
642 return SimpleActivationTest<ArmnnType>(workloadFactory,
643 memoryManager,
644 armnn::ActivationFunction::SoftReLu,
645 0.f,
646 0.f,
647 qScale,
648 qOffset,
649 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100650 qScale,
651 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100652 outputExpectedData);
653}
654
konsof017f6db402019-06-07 15:15:58 +0100655LayerTestResult<float, 4> SoftReLuTest(
656 armnn::IWorkloadFactory& workloadFactory,
657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
658{
659 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
660}
661
662LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
663 armnn::IWorkloadFactory& workloadFactory,
664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
665{
666 return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
667}
668
Teresa Charlin18515e22019-04-24 10:17:46 +0100669LayerTestResult<int16_t, 4> SoftReLuInt16Test(
670 armnn::IWorkloadFactory& workloadFactory,
671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
672{
673 return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
674}
675
676template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
677LayerTestResult<T, 4> LeakyReLuTestCommon(
678 armnn::IWorkloadFactory& workloadFactory,
679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
680 float qScale,
681 int32_t qOffset)
682{
683 std::vector<float> inputData = {
684 -0.1f, -0.2f, -0.3f, -0.4f,
685 0.1f, 0.2f, 0.3f, 0.4f,
686 -1.0f, -2.0f, -3.0f, -4.0f,
687 1.0f, 2.0f, 3.0f, 4.0f
688 };
689
690 const float a = 0.01f;
691 // Calculate output values for input.
692 auto f = [a](float value)
693 {
694 return value > 0.0f ? value : (value * a);
695 };
696 std::vector<float> outputExpectedData(inputData.size());
697 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
698
699 return SimpleActivationTest<ArmnnType>(workloadFactory,
700 memoryManager,
701 armnn::ActivationFunction::LeakyReLu,
702 a,
703 0.f,
704 qScale,
705 qOffset,
706 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100707 qScale,
708 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100709 outputExpectedData);
710}
711
konsof017f6db402019-06-07 15:15:58 +0100712LayerTestResult<float, 4> LeakyReLuTest(
713 armnn::IWorkloadFactory& workloadFactory,
714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
715{
716 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
717}
718
719LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
720 armnn::IWorkloadFactory& workloadFactory,
721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
722{
723 return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
724}
725
Teresa Charlin18515e22019-04-24 10:17:46 +0100726LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
729{
730 return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
731}
732
733template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
734LayerTestResult<T, 4> AbsTestCommon(
735 armnn::IWorkloadFactory& workloadFactory,
736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
737 float qScale,
738 int32_t qOffset)
739{
740 std::vector<float> inputData = {
741 -0.1f, -0.2f, -0.3f, -0.4f,
742 0.1f, 0.2f, 0.3f, 0.4f,
743 -1.0f, -2.0f, -3.0f, -4.0f,
744 1.0f, 2.0f, 3.0f, 4.0f
745 };
746
747 // Calculate output values for input.
748 auto f = [](float value)
749 {
750 return std::abs(value);
751 };
752 std::vector<float> outputExpectedData(inputData.size());
753 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
754
755 return SimpleActivationTest<ArmnnType>(workloadFactory,
756 memoryManager,
757 armnn::ActivationFunction::Abs,
758 0.f,
759 0.f,
760 qScale,
761 qOffset,
762 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100763 qScale,
764 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100765 outputExpectedData);
766}
767
konsof017f6db402019-06-07 15:15:58 +0100768LayerTestResult<float, 4> AbsTest(
769 armnn::IWorkloadFactory& workloadFactory,
770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
771{
772 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
773}
774
775LayerTestResult<uint8_t, 4> AbsUint8Test(
776 armnn::IWorkloadFactory& workloadFactory,
777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
778{
779 return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
780}
781
Teresa Charlin18515e22019-04-24 10:17:46 +0100782LayerTestResult<int16_t, 4> AbsInt16Test(
783 armnn::IWorkloadFactory& workloadFactory,
784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
785{
786 return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
787}
788
789template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
790LayerTestResult<T, 4> SqrtTestCommon(
791 armnn::IWorkloadFactory& workloadFactory,
792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
793 float qScale,
794 int32_t qOffset)
795{
796 std::vector<float> inputData = {
797 0.1f, 0.2f, 0.3f, 0.4f,
798 0.1f, 0.2f, 0.3f, 0.4f,
799 1.0f, 2.0f, 3.0f, 4.0f,
800 1.0f, 2.0f, 3.0f, 4.0f
801 };
802
803 // Calculate output values for input.
804 auto f = [](float value)
805 {
806 return std::sqrt(value);
807 };
808 std::vector<float> outputExpectedData(inputData.size());
809 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
810
811 return SimpleActivationTest<ArmnnType>(workloadFactory,
812 memoryManager,
813 armnn::ActivationFunction::Sqrt,
814 0.f,
815 0.f,
816 qScale,
817 qOffset,
818 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100819 qScale,
820 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100821 outputExpectedData);
822}
823
konsof017f6db402019-06-07 15:15:58 +0100824LayerTestResult<float, 4> SqrtTest(
825 armnn::IWorkloadFactory& workloadFactory,
826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
827{
828 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
829}
830
831LayerTestResult<uint8_t, 4> SqrtUint8Test(
832 armnn::IWorkloadFactory& workloadFactory,
833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
834{
835 return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
836}
837
Teresa Charlin18515e22019-04-24 10:17:46 +0100838LayerTestResult<int16_t, 4> SqrtInt16Test(
839 armnn::IWorkloadFactory& workloadFactory,
840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
841{
842 return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
843}
844
845template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
846LayerTestResult<T, 4> SquareTestCommon(
847 armnn::IWorkloadFactory& workloadFactory,
848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
849 float qScale,
850 int32_t qOffset)
851{
852 std::vector<float> inputData = {
853 -0.1f, -0.2f, -0.3f, -0.4f,
854 0.1f, 0.2f, 0.3f, 0.4f,
855 -1.0f, -2.0f, -3.0f, -4.0f,
856 1.0f, 2.0f, 3.0f, 4.0f
857 };
858
859 // Calculate output values for input.
860 auto f = [](float value)
861 {
862 return std::pow(value,2);
863 };
864 std::vector<float> outputExpectedData(inputData.size());
865 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
866
867 return SimpleActivationTest<ArmnnType>(workloadFactory,
868 memoryManager,
869 armnn::ActivationFunction::Square,
870 0.f,
871 0.f,
872 qScale,
873 qOffset,
874 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100875 qScale,
876 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100877 outputExpectedData);
878}
879
konsof017f6db402019-06-07 15:15:58 +0100880LayerTestResult<float, 4> SquareTest(
881 armnn::IWorkloadFactory& workloadFactory,
882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
883{
884 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
885}
886
887LayerTestResult<uint8_t, 4> SquareUint8Test(
888 armnn::IWorkloadFactory& workloadFactory,
889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
890{
891 return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
892}
893
Teresa Charlin18515e22019-04-24 10:17:46 +0100894LayerTestResult<int16_t, 4> SquareInt16Test(
895 armnn::IWorkloadFactory& workloadFactory,
896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
897{
898 return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
899}
900
901template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
902LayerTestResult<T, 4> TanhTestCommon(
903 armnn::IWorkloadFactory& workloadFactory,
904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
905 float qScale,
906 int32_t qOffset)
907{
908 std::vector<float> inputData = {
909 -0.1f, -0.2f, -0.3f, -0.4f,
910 0.1f, 0.2f, 0.3f, 0.4f,
911 -1.0f, -2.0f, -3.0f, -4.0f,
912 1.0f, 2.0f, 3.0f, 4.0f
913 };
914
915 const float a = 2.0f;
916 const float b = 3.0f;
917 // Calculate output values for input.
918 auto f = [a, b](float value)
919 {
920 return a * tanhf(b * value);
921 };
922 std::vector<float> outputExpectedData(inputData.size());
923 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
924
925 return SimpleActivationTest<ArmnnType>(workloadFactory,
926 memoryManager,
927 armnn::ActivationFunction::TanH,
928 a,
929 b,
930 qScale,
931 qOffset,
932 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100933 qScale,
934 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100935 outputExpectedData);
936}
937
konsof017f6db402019-06-07 15:15:58 +0100938LayerTestResult<float, 4> TanhTest(
939 armnn::IWorkloadFactory& workloadFactory,
940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
941{
942 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
943}
944
945LayerTestResult<uint8_t, 4> TanhUint8Test(
946 armnn::IWorkloadFactory& workloadFactory,
947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
948{
949 return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
950}
951
Teresa Charlin18515e22019-04-24 10:17:46 +0100952LayerTestResult<int16_t, 4> TanhInt16Test(
953 armnn::IWorkloadFactory& workloadFactory,
954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
955{
956 return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
957}
958
959
960
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000961template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000962LayerTestResult<T,4> CompareActivationTestImpl(
963 armnn::IWorkloadFactory& workloadFactory,
964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
965 armnn::IWorkloadFactory& refWorkloadFactory,
966 armnn::ActivationFunction f,
967 unsigned int batchSize = 5,
968 float qScale = 0.0f,
969 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000970{
971 unsigned int width = 17;
972 unsigned int height = 29;
973 unsigned int channels = 2;
974
975 float a = 0.234f;
976 float b = -12.345f;
977
978 armnn::TensorInfo inputTensorInfo;
979 armnn::TensorInfo outputTensorInfo;
980
981 unsigned int shape[] = {batchSize, channels, height, width};
982
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000983 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
984 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000985
986 // Set quantization parameters if the requested type is a quantized type.
987 if(armnn::IsQuantizedType<T>())
988 {
989 inputTensorInfo.SetQuantizationScale(qScale);
990 inputTensorInfo.SetQuantizationOffset(qOffset);
991 outputTensorInfo.SetQuantizationScale(qScale);
992 outputTensorInfo.SetQuantizationOffset(qOffset);
993 }
994
995 float minVal = -10.f;
996 if (f == armnn::ActivationFunction::Sqrt)
997 {
998 minVal = 0.f;
999 }
1000
1001 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1002
1003
1004 LayerTestResult<T,4> ret(outputTensorInfo);
1005 auto boostArrayExtents = boost::extents
1006 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1007 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1008 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1009 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1010 ret.output.resize(boostArrayExtents);
1011 ret.outputExpected.resize(boostArrayExtents);
1012
1013
1014 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1015 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1016
1017 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1018 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1019
1020 armnn::ActivationQueueDescriptor data;
1021 armnn::WorkloadInfo info;
1022 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1023 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1024 data.m_Parameters.m_A = a;
1025 data.m_Parameters.m_B = b;
1026 data.m_Parameters.m_Function = f;
1027
1028 armnn::ActivationQueueDescriptor refData = data;
1029 armnn::WorkloadInfo refInfo = info;
1030 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1031 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1032
1033 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1034 BOOST_ASSERT(workload != nullptr);
1035 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1036 BOOST_ASSERT(workloadRef != nullptr);
1037
1038 inputHandle->Allocate();
1039 outputHandle->Allocate();
1040 inputHandleRef->Allocate();
1041 outputHandleRef->Allocate();
1042
1043 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1044 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1045
1046 workload->Execute();
1047 workloadRef->Execute();
1048
1049 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1050 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1051
1052 return ret;
1053}
1054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001055LayerTestResult<float,4> CompareActivationTest(
1056 armnn::IWorkloadFactory& workloadFactory,
1057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1058 armnn::IWorkloadFactory& refWorkloadFactory,
1059 armnn::ActivationFunction f,
1060 unsigned int batchSize)
telsoa014fcda012018-03-09 14:13:49 +00001061{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001062 return CompareActivationTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001063 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
telsoa014fcda012018-03-09 14:13:49 +00001064}
1065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001066LayerTestResult<uint8_t,4> CompareActivationUint8Test(
1067 armnn::IWorkloadFactory& workloadFactory,
1068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1069 armnn::IWorkloadFactory& refWorkloadFactory,
1070 armnn::ActivationFunction f)
telsoa014fcda012018-03-09 14:13:49 +00001071{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001072 return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001073 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +00001074}
Teresa Charlin18515e22019-04-24 10:17:46 +01001075
1076LayerTestResult<int16_t,4> CompareActivationInt16Test(
1077 armnn::IWorkloadFactory& workloadFactory,
1078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1079 armnn::IWorkloadFactory& refWorkloadFactory,
1080 armnn::ActivationFunction f)
1081{
1082 return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
1083 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1084}