blob: 282e6438d05ae5813111af28321b43386e0a6e8f [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include "ActivationFixture.hpp"
8#include "QuantizeHelper.hpp"
9
telsoa014fcda012018-03-09 14:13:49 +000010#include <armnn/ArmNN.hpp>
11#include <armnn/Tensor.hpp>
12#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000015#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
20#include <algorithm>
21
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000022template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<T, 4> BoundedReLuTestCommon(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 float upperBound,
27 float lowerBound,
28 float inputScale,
29 int32_t inputOffset,
30 float outputScale,
31 int32_t outputOffset,
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +000038{
39 unsigned int outputWidth = inputWidth;
40 unsigned int outputHeight = inputHeight;
41 unsigned int outputChannels = inputChannels;
42 unsigned int outputBatchSize = inputBatchSize;
43
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000044 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000045
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000046 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000047
48 if(armnn::IsQuantizedType<T>())
49 {
50 inputTensorInfo.SetQuantizationScale(inputScale);
51 inputTensorInfo.SetQuantizationOffset(inputOffset);
52
53 outputTensorInfo.SetQuantizationScale(outputScale);
54 outputTensorInfo.SetQuantizationOffset(outputOffset);
55 }
56
57 LayerTestResult<T, 4> result(inputTensorInfo);
58
59 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
60
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63
telsoa01c577f2c2018-08-31 09:22:23 +010064 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +000065 armnn::ActivationQueueDescriptor descriptor;
66 armnn::WorkloadInfo workloadInfo;
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69
70 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
71 descriptor.m_Parameters.m_A = upperBound;
72 descriptor.m_Parameters.m_B = lowerBound;
73
74 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
75
76 inputHandle->Allocate();
77 outputHandle->Allocate();
78
79 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
80
81 workload->Execute();
82
83 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
84
85 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
86
87 return result;
88}
89
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000090LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
91 armnn::IWorkloadFactory& workloadFactory,
92 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000093{
94 unsigned int inputWidth = 4u;
95 unsigned int inputHeight = 5u;
96 unsigned int inputChannels = 1u;
97 unsigned int inputBatchSize = 1;
98
99 std::vector<float> input = std::vector<float>{
100 -2.0f, 0.1f, 0.5f, 1.25f,
101 0.786f, 0.9875f, -1.5f, 0.384f,
102 1.0001f, 3.5f, 7.5f, 0.896f,
103 2.126f, 2.0f, 0.3f, 0.15f,
104 0.999f, 1.2f, 0.89f, 6.1f,
105 };
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000108 std::vector<float> output = std::vector<float>{
109 -1.0f, 0.1f, 0.5f, 1.0f,
110 0.786f, 0.9875f, -1.0f, 0.384f,
111 1.0f, 1.0f, 1.0f, 0.896f,
112 1.0f, 1.0f, 0.3f, 0.15f,
113 0.999f, 1.0f, 0.89f, 1.0f,
114 };
115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000117 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
118 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000119}
120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000121LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000124{
125 unsigned int inputWidth = 4u;
126 unsigned int inputHeight = 5u;
127 unsigned int inputChannels = 1u;
128 unsigned int inputBatchSize = 1;
129
130 std::vector<float> input = std::vector<float>{
131 -1.0f, 0.1f, 0.5f, 6.25f,
132 0.786f, 5.9875f, -0.5f, 0.384f,
133 6.0001f, 3.5f, 7.5f, 0.896f,
134 2.126f, 12.0f, 0.3f, 0.15f,
135 0.999f, 1.2f, 0.89f, 6.1f,
136 };
137
David Beckac42efd2018-09-26 17:41:13 +0100138 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000139 std::vector<float> output = std::vector<float>{
140 0.0f, 0.1f, 0.5f, 6.0f,
141 0.786f, 5.9875f, 0.0f, 0.384f,
142 6.0f, 3.5f, 6.0f, 0.896f,
143 2.126f, 6.0f, 0.3f, 0.15f,
144 0.999f, 1.2f, 0.89f, 6.0f,
145 };
146
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000147 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000148 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
149 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000150}
151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000152LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000155{
156 unsigned int inputWidth = 3u;
157 unsigned int inputHeight = 2u;
158 unsigned int inputChannels = 1u;
159 unsigned int inputBatchSize = 1;
160
161 std::vector<uint8_t> input = std::vector<uint8_t>{
162 51, 124, 28,
163 251, 8, 92
164 };
165
David Beckac42efd2018-09-26 17:41:13 +0100166 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000167 std::vector<uint8_t> output = std::vector<uint8_t>{
168 0, 122, 0,
169 255, 0, 58
170 };
171
172 float inputScale = 12.0f / 255.0f;
173 int32_t inputOffset = 63;
174 float outputScale = 6.0f / 255.0f;
175 int32_t outputOffset = 0;
176
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000177 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
178 workloadFactory, memoryManager, 6.0f, 0.0f,
179 inputScale, inputOffset, outputScale, outputOffset,
180 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000181}
182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000186{
187 unsigned int inputWidth = 3u;
188 unsigned int inputHeight = 2u;
189 unsigned int inputChannels = 1u;
190 unsigned int inputBatchSize = 1;
191
192 std::vector<uint8_t> input = std::vector<uint8_t>{
193 51, 230, 28,
194 251, 8, 92
195 };
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000198 std::vector<uint8_t> output = std::vector<uint8_t>{
199 51, 192, 32,
200 192, 32, 92
201 };
202
203 int32_t inputOffset = 112;
204 float inputScale = 0.0125f;
205
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000206 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
207 workloadFactory, memoryManager, 1.0f, -1.0f,
208 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
209 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000210}
211
212namespace
213{
214
215struct BoundedReLuRandomInputTestTraits
216{
217 constexpr static unsigned int inputHeight = 31u;
218 constexpr static unsigned int inputWidth = 19u;
219 constexpr static unsigned int inputChannels = 4u;
220 constexpr static unsigned int inputBatchSize = 2;
221
222 constexpr static unsigned int outputHeight = inputHeight;
223 constexpr static unsigned int outputWidth = inputWidth;
224 constexpr static unsigned int outputChannels = inputChannels;
225 constexpr static unsigned int outputBatchSize = inputBatchSize;
226
227 static armnn::TensorInfo GetInputTensorInfo()
228 {
229 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
230 armnn::DataType::Float32);
231 }
232
233 static armnn::TensorInfo GetOutputTensorInfo()
234 {
235 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
236 armnn::DataType::Float32);
237 }
238};
239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240boost::multi_array<float, 4> BoundedReLuRandomInputTest(
241 armnn::IWorkloadFactory& workloadFactory,
242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
243 float lowerBound,
244 float upperBound,
245 const armnn::ActivationDescriptor& activationDescriptor)
telsoa014fcda012018-03-09 14:13:49 +0000246{
247 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
248 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
249
250 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
253 // range [lowerBound, upperBound].
telsoa014fcda012018-03-09 14:13:49 +0000254 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
255
256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
258
telsoa01c577f2c2018-08-31 09:22:23 +0100259 // Set up bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000260 armnn::ActivationQueueDescriptor descriptor;
261 armnn::WorkloadInfo workloadInfo;
262 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
263 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
264 descriptor.m_Parameters = activationDescriptor;
265
266 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
267
268 inputHandle->Allocate();
269 outputHandle->Allocate();
270
271 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
272
273 workload->Execute();
274
275 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
276
277 return output;
278}
279
280} // namespace
281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000282LayerTestResult<float, 4> CompareBoundedReLuTest(
283 armnn::IWorkloadFactory& workloadFactory,
284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
285 armnn::IWorkloadFactory& refWorkloadFactory,
286 float upperBound,
287 float lowerBound)
telsoa014fcda012018-03-09 14:13:49 +0000288{
289 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
290
291 armnn::ActivationDescriptor activationDescriptor;
292 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
293 activationDescriptor.m_A = upperBound;
294 activationDescriptor.m_B = lowerBound;
295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000296 result.output = BoundedReLuRandomInputTest(
297 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
298 result.outputExpected = BoundedReLuRandomInputTest(
299 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
telsoa014fcda012018-03-09 14:13:49 +0000300
301 return result;
302}
303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000304template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000305LayerTestResult<T,4> ConstantLinearActivationTestCommon(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308 float qScale = 0.0f,
309 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000310{
311 unsigned int inputHeight = 20;
312 unsigned int inputWidth = 17;
313 unsigned int inputChannels = 3;
314 unsigned int batchSize = 5;
315
316 armnn::TensorInfo inputTensorInfo;
317 armnn::TensorInfo outputTensorInfo;
318
319 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000321 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
322 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000323
324 // Set quantization parameters if the requested type is a quantized type.
325 if(armnn::IsQuantizedType<T>())
326 {
327 inputTensorInfo.SetQuantizationScale(qScale);
328 inputTensorInfo.SetQuantizationOffset(qOffset);
329 outputTensorInfo.SetQuantizationScale(qScale);
330 outputTensorInfo.SetQuantizationOffset(qOffset);
331 }
332
333 LayerTestResult<T, 4> ret(outputTensorInfo);
334
335 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
337
telsoa01c577f2c2018-08-31 09:22:23 +0100338 // Do linear activation that should leave the tensor unchanged.
telsoa014fcda012018-03-09 14:13:49 +0000339 armnn::ActivationQueueDescriptor data;
340 armnn::WorkloadInfo info;
341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
343 data.m_Parameters.m_A = 1.0f;
344 data.m_Parameters.m_B = 0.0f;
345 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
346
347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
348
349 inputHandle->Allocate();
350 outputHandle->Allocate();
351
352 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
353 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
354
355 workload->Execute();
356
357 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
358
telsoa01c577f2c2018-08-31 09:22:23 +0100359 // Ensure output equals input.
telsoa014fcda012018-03-09 14:13:49 +0000360 ret.outputExpected = input;
361
362 return ret;
363}
364
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000365LayerTestResult<float, 4> ConstantLinearActivationTest(
366 armnn::IWorkloadFactory& workloadFactory,
367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000372LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000376 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
377 workloadFactory, memoryManager, 4.0f, 3);
telsoa014fcda012018-03-09 14:13:49 +0000378}
379
Teresa Charlin18515e22019-04-24 10:17:46 +0100380LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
381 armnn::IWorkloadFactory& workloadFactory,
382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
383{
384 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
385 workloadFactory, memoryManager, 0.1f, 0);
386}
387
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000388template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000389LayerTestResult<T, 4> SimpleActivationTest(
390 armnn::IWorkloadFactory& workloadFactory,
391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
392 armnn::ActivationFunction activationFunction,
393 float activationParameterA,
394 float activationParameterB,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100395 float scale,
396 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000397 const std::vector<float>& inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100398 float outScale,
399 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000400 const std::vector<float>& outputExpectedData)
telsoa014fcda012018-03-09 14:13:49 +0000401{
402 constexpr static unsigned int inputWidth = 16u;
403 constexpr static unsigned int inputHeight = 1u;
404 constexpr static unsigned int inputChannels = 1u;
405 constexpr static unsigned int inputBatchSize = 1u;
406
407 constexpr static unsigned int outputWidth = inputWidth;
408 constexpr static unsigned int outputHeight = inputHeight;
409 constexpr static unsigned int outputChannels = inputChannels;
410 constexpr static unsigned int outputBatchSize = inputBatchSize;
411
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000412 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
413 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000414
415 // Set quantization parameters if the requested type is a quantized type.
416 if(armnn::IsQuantizedType<T>())
417 {
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100418 inputTensorInfo.SetQuantizationScale(scale);
419 inputTensorInfo.SetQuantizationOffset(offset);
420 outputTensorInfo.SetQuantizationScale(outScale);
421 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +0000422 }
423
424 LayerTestResult<T, 4> result(inputTensorInfo);
425
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100426 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
telsoa014fcda012018-03-09 14:13:49 +0000427
428 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
429 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000432 armnn::ActivationQueueDescriptor descriptor;
433 armnn::WorkloadInfo workloadInfo;
434 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
435 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
436
437 descriptor.m_Parameters.m_Function = activationFunction;
438 descriptor.m_Parameters.m_A = activationParameterA;
439 descriptor.m_Parameters.m_B = activationParameterB;
440
441 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
442
443 inputHandle->Allocate();
444 outputHandle->Allocate();
445
446 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
447
448 workload->Execute();
449
450 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
451
telsoa01c577f2c2018-08-31 09:22:23 +0100452 // Calculated manually.
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100453 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
454 outputExpectedData));
telsoa014fcda012018-03-09 14:13:49 +0000455
456 return result;
457}
458
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000459template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000460LayerTestResult<T, 4> SimpleSigmoidTestCommon(
461 armnn::IWorkloadFactory& workloadFactory,
462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
463 float qScale,
464 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000465{
466 std::vector<float> inputData = {
467 -0.1f, -0.2f, -0.3f, -0.4f,
468 0.1f, 0.2f, 0.3f, 0.4f,
469 -1.0f, -2.0f, -3.0f, -4.0f,
470 1.0f, 2.0f, 3.0f, 4.0f
471 };
472
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Calculate output values for input.
telsoa014fcda012018-03-09 14:13:49 +0000474 auto f = [](float value)
475 {
476 return 1.0f / (1.0f + std::exp(-value));
477 };
478 std::vector<float> outputExpectedData(inputData.size());
479 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
480
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000481 return SimpleActivationTest<ArmnnType>(workloadFactory,
482 memoryManager,
483 armnn::ActivationFunction::Sigmoid,
484 0.f,
485 0.f,
486 qScale,
487 qOffset,
488 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100489 1.f / 256.f,
490 0,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000491 outputExpectedData);
telsoa014fcda012018-03-09 14:13:49 +0000492}
493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000494LayerTestResult<float, 4> SimpleSigmoidTest(
495 armnn::IWorkloadFactory& workloadFactory,
496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000498 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000499}
500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000501LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
502 armnn::IWorkloadFactory& workloadFactory,
503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000504{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000505 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000506}
507
Teresa Charlin18515e22019-04-24 10:17:46 +0100508LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
509 armnn::IWorkloadFactory& workloadFactory,
510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
511{
512 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
513}
514
515template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
516LayerTestResult<T, 4> ReLuTestCommon(
517 armnn::IWorkloadFactory& workloadFactory,
518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
519 float qScale,
520 int32_t qOffset)
521{
522 std::vector<float> inputData = {
523 -0.1f, -0.2f, -0.3f, -0.4f,
524 0.1f, 0.2f, 0.3f, 0.4f,
525 -1.0f, -2.0f, -3.0f, -4.0f,
526 1.0f, 2.0f, 3.0f, 4.0f
527 };
528
529 // Calculate output values for input.
530 auto f = [](float value)
531 {
532 return std::fmax(0.0f, value);
533 };
534 std::vector<float> outputExpectedData(inputData.size());
535 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
536
537 return SimpleActivationTest<ArmnnType>(workloadFactory,
538 memoryManager,
539 armnn::ActivationFunction::ReLu,
540 0.f,
541 0.f,
542 qScale,
543 qOffset,
544 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100545 qScale,
546 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100547 outputExpectedData);
548}
549
550LayerTestResult<int16_t, 4> ReLuInt16Test(
551 armnn::IWorkloadFactory& workloadFactory,
552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
553{
554 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
555}
556
557
konsof017f6db402019-06-07 15:15:58 +0100558LayerTestResult<uint8_t, 4> ReLuUint8Test(
559 armnn::IWorkloadFactory& workloadFactory,
560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
561{
562 return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
563}
564
565LayerTestResult<float, 4> ReLuTest(
566 armnn::IWorkloadFactory& workloadFactory,
567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
568{
569 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
570}
571
572
Teresa Charlin18515e22019-04-24 10:17:46 +0100573template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
574LayerTestResult<T, 4> BoundedReLuTestCommon(
575 armnn::IWorkloadFactory& workloadFactory,
576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
577 float qScale,
578 int32_t qOffset)
579{
580 std::vector<float> inputData = {
581 -0.1f, -0.2f, -0.3f, -0.4f,
582 0.1f, 0.2f, 0.3f, 0.4f,
583 -1.0f, -2.0f, -3.0f, -4.0f,
584 1.0f, 2.0f, 3.0f, 4.0f
585 };
586 const float a = 1.0f;
587 const float b = -1.0f;
588 // Calculate output values for input.
589 auto f = [a, b](float value)
590 {
591 return std::min(a, std::max(b, value));
592 };
593 std::vector<float> outputExpectedData(inputData.size());
594 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
595
596 return SimpleActivationTest<ArmnnType>(workloadFactory,
597 memoryManager,
598 armnn::ActivationFunction::BoundedReLu,
599 a,
600 b,
601 qScale,
602 qOffset,
603 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100604 qScale,
605 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100606 outputExpectedData);
607}
608
609LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
610 armnn::IWorkloadFactory& workloadFactory,
611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
612{
613 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
614}
615
616
617
618template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
619LayerTestResult<T, 4> SoftReLuTestCommon(
620 armnn::IWorkloadFactory& workloadFactory,
621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
622 float qScale,
623 int32_t qOffset)
624{
625 std::vector<float> inputData = {
626 -0.1f, -0.2f, -0.3f, -0.4f,
627 0.1f, 0.2f, 0.3f, 0.4f,
628 -1.0f, -2.0f, -3.0f, -4.0f,
629 1.0f, 2.0f, 3.0f, 4.0f
630 };
631
632 // Calculate output values for input.
633 auto f = [](float value)
634 {
635 return std::log(1.0f + std::exp(value));
636 };
637 std::vector<float> outputExpectedData(inputData.size());
638 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
639
640 return SimpleActivationTest<ArmnnType>(workloadFactory,
641 memoryManager,
642 armnn::ActivationFunction::SoftReLu,
643 0.f,
644 0.f,
645 qScale,
646 qOffset,
647 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100648 qScale,
649 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100650 outputExpectedData);
651}
652
konsof017f6db402019-06-07 15:15:58 +0100653LayerTestResult<float, 4> SoftReLuTest(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
656{
657 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
658}
659
660LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
661 armnn::IWorkloadFactory& workloadFactory,
662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
663{
664 return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
665}
666
Teresa Charlin18515e22019-04-24 10:17:46 +0100667LayerTestResult<int16_t, 4> SoftReLuInt16Test(
668 armnn::IWorkloadFactory& workloadFactory,
669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
670{
671 return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
672}
673
674template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
675LayerTestResult<T, 4> LeakyReLuTestCommon(
676 armnn::IWorkloadFactory& workloadFactory,
677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
678 float qScale,
679 int32_t qOffset)
680{
681 std::vector<float> inputData = {
682 -0.1f, -0.2f, -0.3f, -0.4f,
683 0.1f, 0.2f, 0.3f, 0.4f,
684 -1.0f, -2.0f, -3.0f, -4.0f,
685 1.0f, 2.0f, 3.0f, 4.0f
686 };
687
688 const float a = 0.01f;
689 // Calculate output values for input.
690 auto f = [a](float value)
691 {
692 return value > 0.0f ? value : (value * a);
693 };
694 std::vector<float> outputExpectedData(inputData.size());
695 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
696
697 return SimpleActivationTest<ArmnnType>(workloadFactory,
698 memoryManager,
699 armnn::ActivationFunction::LeakyReLu,
700 a,
701 0.f,
702 qScale,
703 qOffset,
704 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100705 qScale,
706 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100707 outputExpectedData);
708}
709
konsof017f6db402019-06-07 15:15:58 +0100710LayerTestResult<float, 4> LeakyReLuTest(
711 armnn::IWorkloadFactory& workloadFactory,
712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
713{
714 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
715}
716
717LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
718 armnn::IWorkloadFactory& workloadFactory,
719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
720{
721 return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
722}
723
Teresa Charlin18515e22019-04-24 10:17:46 +0100724LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
725 armnn::IWorkloadFactory& workloadFactory,
726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
727{
728 return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
729}
730
731template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
732LayerTestResult<T, 4> AbsTestCommon(
733 armnn::IWorkloadFactory& workloadFactory,
734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
735 float qScale,
736 int32_t qOffset)
737{
738 std::vector<float> inputData = {
739 -0.1f, -0.2f, -0.3f, -0.4f,
740 0.1f, 0.2f, 0.3f, 0.4f,
741 -1.0f, -2.0f, -3.0f, -4.0f,
742 1.0f, 2.0f, 3.0f, 4.0f
743 };
744
745 // Calculate output values for input.
746 auto f = [](float value)
747 {
748 return std::abs(value);
749 };
750 std::vector<float> outputExpectedData(inputData.size());
751 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
752
753 return SimpleActivationTest<ArmnnType>(workloadFactory,
754 memoryManager,
755 armnn::ActivationFunction::Abs,
756 0.f,
757 0.f,
758 qScale,
759 qOffset,
760 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100761 qScale,
762 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100763 outputExpectedData);
764}
765
konsof017f6db402019-06-07 15:15:58 +0100766LayerTestResult<float, 4> AbsTest(
767 armnn::IWorkloadFactory& workloadFactory,
768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
769{
770 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
771}
772
773LayerTestResult<uint8_t, 4> AbsUint8Test(
774 armnn::IWorkloadFactory& workloadFactory,
775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
776{
777 return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
778}
779
Teresa Charlin18515e22019-04-24 10:17:46 +0100780LayerTestResult<int16_t, 4> AbsInt16Test(
781 armnn::IWorkloadFactory& workloadFactory,
782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
783{
784 return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
785}
786
787template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
788LayerTestResult<T, 4> SqrtTestCommon(
789 armnn::IWorkloadFactory& workloadFactory,
790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
791 float qScale,
792 int32_t qOffset)
793{
794 std::vector<float> inputData = {
795 0.1f, 0.2f, 0.3f, 0.4f,
796 0.1f, 0.2f, 0.3f, 0.4f,
797 1.0f, 2.0f, 3.0f, 4.0f,
798 1.0f, 2.0f, 3.0f, 4.0f
799 };
800
801 // Calculate output values for input.
802 auto f = [](float value)
803 {
804 return std::sqrt(value);
805 };
806 std::vector<float> outputExpectedData(inputData.size());
807 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
808
809 return SimpleActivationTest<ArmnnType>(workloadFactory,
810 memoryManager,
811 armnn::ActivationFunction::Sqrt,
812 0.f,
813 0.f,
814 qScale,
815 qOffset,
816 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100817 qScale,
818 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100819 outputExpectedData);
820}
821
konsof017f6db402019-06-07 15:15:58 +0100822LayerTestResult<float, 4> SqrtTest(
823 armnn::IWorkloadFactory& workloadFactory,
824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
825{
826 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
827}
828
829LayerTestResult<uint8_t, 4> SqrtUint8Test(
830 armnn::IWorkloadFactory& workloadFactory,
831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
832{
833 return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
834}
835
Teresa Charlin18515e22019-04-24 10:17:46 +0100836LayerTestResult<int16_t, 4> SqrtInt16Test(
837 armnn::IWorkloadFactory& workloadFactory,
838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
839{
840 return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
841}
842
843template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
844LayerTestResult<T, 4> SquareTestCommon(
845 armnn::IWorkloadFactory& workloadFactory,
846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
847 float qScale,
848 int32_t qOffset)
849{
850 std::vector<float> inputData = {
851 -0.1f, -0.2f, -0.3f, -0.4f,
852 0.1f, 0.2f, 0.3f, 0.4f,
853 -1.0f, -2.0f, -3.0f, -4.0f,
854 1.0f, 2.0f, 3.0f, 4.0f
855 };
856
857 // Calculate output values for input.
858 auto f = [](float value)
859 {
860 return std::pow(value,2);
861 };
862 std::vector<float> outputExpectedData(inputData.size());
863 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
864
865 return SimpleActivationTest<ArmnnType>(workloadFactory,
866 memoryManager,
867 armnn::ActivationFunction::Square,
868 0.f,
869 0.f,
870 qScale,
871 qOffset,
872 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100873 qScale,
874 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100875 outputExpectedData);
876}
877
konsof017f6db402019-06-07 15:15:58 +0100878LayerTestResult<float, 4> SquareTest(
879 armnn::IWorkloadFactory& workloadFactory,
880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
881{
882 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
883}
884
885LayerTestResult<uint8_t, 4> SquareUint8Test(
886 armnn::IWorkloadFactory& workloadFactory,
887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
888{
889 return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
890}
891
Teresa Charlin18515e22019-04-24 10:17:46 +0100892LayerTestResult<int16_t, 4> SquareInt16Test(
893 armnn::IWorkloadFactory& workloadFactory,
894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
895{
896 return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
897}
898
899template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
900LayerTestResult<T, 4> TanhTestCommon(
901 armnn::IWorkloadFactory& workloadFactory,
902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
903 float qScale,
904 int32_t qOffset)
905{
906 std::vector<float> inputData = {
907 -0.1f, -0.2f, -0.3f, -0.4f,
908 0.1f, 0.2f, 0.3f, 0.4f,
909 -1.0f, -2.0f, -3.0f, -4.0f,
910 1.0f, 2.0f, 3.0f, 4.0f
911 };
912
913 const float a = 2.0f;
914 const float b = 3.0f;
915 // Calculate output values for input.
916 auto f = [a, b](float value)
917 {
918 return a * tanhf(b * value);
919 };
920 std::vector<float> outputExpectedData(inputData.size());
921 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
922
923 return SimpleActivationTest<ArmnnType>(workloadFactory,
924 memoryManager,
925 armnn::ActivationFunction::TanH,
926 a,
927 b,
928 qScale,
929 qOffset,
930 inputData,
Ferran Balaguerb2b5a262019-06-24 12:43:38 +0100931 qScale,
932 qOffset,
Teresa Charlin18515e22019-04-24 10:17:46 +0100933 outputExpectedData);
934}
935
konsof017f6db402019-06-07 15:15:58 +0100936LayerTestResult<float, 4> TanhTest(
937 armnn::IWorkloadFactory& workloadFactory,
938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
939{
940 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
941}
942
943LayerTestResult<uint8_t, 4> TanhUint8Test(
944 armnn::IWorkloadFactory& workloadFactory,
945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
946{
947 return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
948}
949
Teresa Charlin18515e22019-04-24 10:17:46 +0100950LayerTestResult<int16_t, 4> TanhInt16Test(
951 armnn::IWorkloadFactory& workloadFactory,
952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
953{
954 return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
955}
956
957
958
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000959template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000960LayerTestResult<T,4> CompareActivationTestImpl(
961 armnn::IWorkloadFactory& workloadFactory,
962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
963 armnn::IWorkloadFactory& refWorkloadFactory,
964 armnn::ActivationFunction f,
965 unsigned int batchSize = 5,
966 float qScale = 0.0f,
967 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000968{
969 unsigned int width = 17;
970 unsigned int height = 29;
971 unsigned int channels = 2;
972
973 float a = 0.234f;
974 float b = -12.345f;
975
976 armnn::TensorInfo inputTensorInfo;
977 armnn::TensorInfo outputTensorInfo;
978
979 unsigned int shape[] = {batchSize, channels, height, width};
980
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000981 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
982 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000983
984 // Set quantization parameters if the requested type is a quantized type.
985 if(armnn::IsQuantizedType<T>())
986 {
987 inputTensorInfo.SetQuantizationScale(qScale);
988 inputTensorInfo.SetQuantizationOffset(qOffset);
989 outputTensorInfo.SetQuantizationScale(qScale);
990 outputTensorInfo.SetQuantizationOffset(qOffset);
991 }
992
993 float minVal = -10.f;
994 if (f == armnn::ActivationFunction::Sqrt)
995 {
996 minVal = 0.f;
997 }
998
999 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1000
1001
1002 LayerTestResult<T,4> ret(outputTensorInfo);
1003 auto boostArrayExtents = boost::extents
1004 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1005 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1006 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1007 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1008 ret.output.resize(boostArrayExtents);
1009 ret.outputExpected.resize(boostArrayExtents);
1010
1011
1012 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1013 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1014
1015 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1016 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1017
1018 armnn::ActivationQueueDescriptor data;
1019 armnn::WorkloadInfo info;
1020 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1021 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1022 data.m_Parameters.m_A = a;
1023 data.m_Parameters.m_B = b;
1024 data.m_Parameters.m_Function = f;
1025
1026 armnn::ActivationQueueDescriptor refData = data;
1027 armnn::WorkloadInfo refInfo = info;
1028 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1029 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1030
1031 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1032 BOOST_ASSERT(workload != nullptr);
1033 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1034 BOOST_ASSERT(workloadRef != nullptr);
1035
1036 inputHandle->Allocate();
1037 outputHandle->Allocate();
1038 inputHandleRef->Allocate();
1039 outputHandleRef->Allocate();
1040
1041 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1042 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1043
1044 workload->Execute();
1045 workloadRef->Execute();
1046
1047 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1048 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1049
1050 return ret;
1051}
1052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001053LayerTestResult<float,4> CompareActivationTest(
1054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1056 armnn::IWorkloadFactory& refWorkloadFactory,
1057 armnn::ActivationFunction f,
1058 unsigned int batchSize)
telsoa014fcda012018-03-09 14:13:49 +00001059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001060 return CompareActivationTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001061 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
telsoa014fcda012018-03-09 14:13:49 +00001062}
1063
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001064LayerTestResult<uint8_t,4> CompareActivationUint8Test(
1065 armnn::IWorkloadFactory& workloadFactory,
1066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1067 armnn::IWorkloadFactory& refWorkloadFactory,
1068 armnn::ActivationFunction f)
telsoa014fcda012018-03-09 14:13:49 +00001069{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001070 return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +00001072}
Teresa Charlin18515e22019-04-24 10:17:46 +01001073
1074LayerTestResult<int16_t,4> CompareActivationInt16Test(
1075 armnn::IWorkloadFactory& workloadFactory,
1076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1077 armnn::IWorkloadFactory& refWorkloadFactory,
1078 armnn::ActivationFunction f)
1079{
1080 return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
1081 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1082}