blob: 9088d18858a153cc0011cb01db239dd3c4fba389 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include "ActivationFixture.hpp"
8#include "QuantizeHelper.hpp"
9
telsoa014fcda012018-03-09 14:13:49 +000010#include <armnn/ArmNN.hpp>
11#include <armnn/Tensor.hpp>
12#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000015#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
20#include <algorithm>
21
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000022template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<T, 4> BoundedReLuTestCommon(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 float upperBound,
27 float lowerBound,
28 float inputScale,
29 int32_t inputOffset,
30 float outputScale,
31 int32_t outputOffset,
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +000038{
39 unsigned int outputWidth = inputWidth;
40 unsigned int outputHeight = inputHeight;
41 unsigned int outputChannels = inputChannels;
42 unsigned int outputBatchSize = inputBatchSize;
43
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000044 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000045
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000046 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000047
48 if(armnn::IsQuantizedType<T>())
49 {
50 inputTensorInfo.SetQuantizationScale(inputScale);
51 inputTensorInfo.SetQuantizationOffset(inputOffset);
52
53 outputTensorInfo.SetQuantizationScale(outputScale);
54 outputTensorInfo.SetQuantizationOffset(outputOffset);
55 }
56
57 LayerTestResult<T, 4> result(inputTensorInfo);
58
59 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
60
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63
telsoa01c577f2c2018-08-31 09:22:23 +010064 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +000065 armnn::ActivationQueueDescriptor descriptor;
66 armnn::WorkloadInfo workloadInfo;
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69
70 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
71 descriptor.m_Parameters.m_A = upperBound;
72 descriptor.m_Parameters.m_B = lowerBound;
73
74 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
75
76 inputHandle->Allocate();
77 outputHandle->Allocate();
78
79 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
80
81 workload->Execute();
82
83 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
84
85 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
86
87 return result;
88}
89
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000090LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
91 armnn::IWorkloadFactory& workloadFactory,
92 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000093{
94 unsigned int inputWidth = 4u;
95 unsigned int inputHeight = 5u;
96 unsigned int inputChannels = 1u;
97 unsigned int inputBatchSize = 1;
98
99 std::vector<float> input = std::vector<float>{
100 -2.0f, 0.1f, 0.5f, 1.25f,
101 0.786f, 0.9875f, -1.5f, 0.384f,
102 1.0001f, 3.5f, 7.5f, 0.896f,
103 2.126f, 2.0f, 0.3f, 0.15f,
104 0.999f, 1.2f, 0.89f, 6.1f,
105 };
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000108 std::vector<float> output = std::vector<float>{
109 -1.0f, 0.1f, 0.5f, 1.0f,
110 0.786f, 0.9875f, -1.0f, 0.384f,
111 1.0f, 1.0f, 1.0f, 0.896f,
112 1.0f, 1.0f, 0.3f, 0.15f,
113 0.999f, 1.0f, 0.89f, 1.0f,
114 };
115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000117 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
118 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000119}
120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000121LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000124{
125 unsigned int inputWidth = 4u;
126 unsigned int inputHeight = 5u;
127 unsigned int inputChannels = 1u;
128 unsigned int inputBatchSize = 1;
129
130 std::vector<float> input = std::vector<float>{
131 -1.0f, 0.1f, 0.5f, 6.25f,
132 0.786f, 5.9875f, -0.5f, 0.384f,
133 6.0001f, 3.5f, 7.5f, 0.896f,
134 2.126f, 12.0f, 0.3f, 0.15f,
135 0.999f, 1.2f, 0.89f, 6.1f,
136 };
137
David Beckac42efd2018-09-26 17:41:13 +0100138 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000139 std::vector<float> output = std::vector<float>{
140 0.0f, 0.1f, 0.5f, 6.0f,
141 0.786f, 5.9875f, 0.0f, 0.384f,
142 6.0f, 3.5f, 6.0f, 0.896f,
143 2.126f, 6.0f, 0.3f, 0.15f,
144 0.999f, 1.2f, 0.89f, 6.0f,
145 };
146
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000147 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000148 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
149 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000150}
151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000152LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000155{
156 unsigned int inputWidth = 3u;
157 unsigned int inputHeight = 2u;
158 unsigned int inputChannels = 1u;
159 unsigned int inputBatchSize = 1;
160
161 std::vector<uint8_t> input = std::vector<uint8_t>{
162 51, 124, 28,
163 251, 8, 92
164 };
165
David Beckac42efd2018-09-26 17:41:13 +0100166 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000167 std::vector<uint8_t> output = std::vector<uint8_t>{
168 0, 122, 0,
169 255, 0, 58
170 };
171
172 float inputScale = 12.0f / 255.0f;
173 int32_t inputOffset = 63;
174 float outputScale = 6.0f / 255.0f;
175 int32_t outputOffset = 0;
176
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000177 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
178 workloadFactory, memoryManager, 6.0f, 0.0f,
179 inputScale, inputOffset, outputScale, outputOffset,
180 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000181}
182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000186{
187 unsigned int inputWidth = 3u;
188 unsigned int inputHeight = 2u;
189 unsigned int inputChannels = 1u;
190 unsigned int inputBatchSize = 1;
191
192 std::vector<uint8_t> input = std::vector<uint8_t>{
193 51, 230, 28,
194 251, 8, 92
195 };
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000198 std::vector<uint8_t> output = std::vector<uint8_t>{
199 51, 192, 32,
200 192, 32, 92
201 };
202
203 int32_t inputOffset = 112;
204 float inputScale = 0.0125f;
205
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000206 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
207 workloadFactory, memoryManager, 1.0f, -1.0f,
208 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
209 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000210}
211
212namespace
213{
214
215struct BoundedReLuRandomInputTestTraits
216{
217 constexpr static unsigned int inputHeight = 31u;
218 constexpr static unsigned int inputWidth = 19u;
219 constexpr static unsigned int inputChannels = 4u;
220 constexpr static unsigned int inputBatchSize = 2;
221
222 constexpr static unsigned int outputHeight = inputHeight;
223 constexpr static unsigned int outputWidth = inputWidth;
224 constexpr static unsigned int outputChannels = inputChannels;
225 constexpr static unsigned int outputBatchSize = inputBatchSize;
226
227 static armnn::TensorInfo GetInputTensorInfo()
228 {
229 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
230 armnn::DataType::Float32);
231 }
232
233 static armnn::TensorInfo GetOutputTensorInfo()
234 {
235 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
236 armnn::DataType::Float32);
237 }
238};
239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240boost::multi_array<float, 4> BoundedReLuRandomInputTest(
241 armnn::IWorkloadFactory& workloadFactory,
242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
243 float lowerBound,
244 float upperBound,
245 const armnn::ActivationDescriptor& activationDescriptor)
telsoa014fcda012018-03-09 14:13:49 +0000246{
247 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
248 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
249
250 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
253 // range [lowerBound, upperBound].
telsoa014fcda012018-03-09 14:13:49 +0000254 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
255
256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
258
telsoa01c577f2c2018-08-31 09:22:23 +0100259 // Set up bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000260 armnn::ActivationQueueDescriptor descriptor;
261 armnn::WorkloadInfo workloadInfo;
262 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
263 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
264 descriptor.m_Parameters = activationDescriptor;
265
266 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
267
268 inputHandle->Allocate();
269 outputHandle->Allocate();
270
271 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
272
273 workload->Execute();
274
275 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
276
277 return output;
278}
279
280} // namespace
281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000282LayerTestResult<float, 4> CompareBoundedReLuTest(
283 armnn::IWorkloadFactory& workloadFactory,
284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
285 armnn::IWorkloadFactory& refWorkloadFactory,
286 float upperBound,
287 float lowerBound)
telsoa014fcda012018-03-09 14:13:49 +0000288{
289 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
290
291 armnn::ActivationDescriptor activationDescriptor;
292 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
293 activationDescriptor.m_A = upperBound;
294 activationDescriptor.m_B = lowerBound;
295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000296 result.output = BoundedReLuRandomInputTest(
297 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
298 result.outputExpected = BoundedReLuRandomInputTest(
299 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
telsoa014fcda012018-03-09 14:13:49 +0000300
301 return result;
302}
303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000304template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000305LayerTestResult<T,4> ConstantLinearActivationTestCommon(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308 float qScale = 0.0f,
309 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000310{
311 unsigned int inputHeight = 20;
312 unsigned int inputWidth = 17;
313 unsigned int inputChannels = 3;
314 unsigned int batchSize = 5;
315
316 armnn::TensorInfo inputTensorInfo;
317 armnn::TensorInfo outputTensorInfo;
318
319 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000321 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
322 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000323
324 // Set quantization parameters if the requested type is a quantized type.
325 if(armnn::IsQuantizedType<T>())
326 {
327 inputTensorInfo.SetQuantizationScale(qScale);
328 inputTensorInfo.SetQuantizationOffset(qOffset);
329 outputTensorInfo.SetQuantizationScale(qScale);
330 outputTensorInfo.SetQuantizationOffset(qOffset);
331 }
332
333 LayerTestResult<T, 4> ret(outputTensorInfo);
334
335 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
337
telsoa01c577f2c2018-08-31 09:22:23 +0100338 // Do linear activation that should leave the tensor unchanged.
telsoa014fcda012018-03-09 14:13:49 +0000339 armnn::ActivationQueueDescriptor data;
340 armnn::WorkloadInfo info;
341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
343 data.m_Parameters.m_A = 1.0f;
344 data.m_Parameters.m_B = 0.0f;
345 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
346
347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
348
349 inputHandle->Allocate();
350 outputHandle->Allocate();
351
352 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
353 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
354
355 workload->Execute();
356
357 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
358
telsoa01c577f2c2018-08-31 09:22:23 +0100359 // Ensure output equals input.
telsoa014fcda012018-03-09 14:13:49 +0000360 ret.outputExpected = input;
361
362 return ret;
363}
364
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000365LayerTestResult<float, 4> ConstantLinearActivationTest(
366 armnn::IWorkloadFactory& workloadFactory,
367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000372LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000376 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
377 workloadFactory, memoryManager, 4.0f, 3);
telsoa014fcda012018-03-09 14:13:49 +0000378}
379
Teresa Charlin18515e22019-04-24 10:17:46 +0100380LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
381 armnn::IWorkloadFactory& workloadFactory,
382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
383{
384 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
385 workloadFactory, memoryManager, 0.1f, 0);
386}
387
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000388template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000389LayerTestResult<T, 4> SimpleActivationTest(
390 armnn::IWorkloadFactory& workloadFactory,
391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
392 armnn::ActivationFunction activationFunction,
393 float activationParameterA,
394 float activationParameterB,
395 float qScale,
396 int32_t qOffset,
397 const std::vector<float>& inputData,
398 const std::vector<float>& outputExpectedData)
telsoa014fcda012018-03-09 14:13:49 +0000399{
400 constexpr static unsigned int inputWidth = 16u;
401 constexpr static unsigned int inputHeight = 1u;
402 constexpr static unsigned int inputChannels = 1u;
403 constexpr static unsigned int inputBatchSize = 1u;
404
405 constexpr static unsigned int outputWidth = inputWidth;
406 constexpr static unsigned int outputHeight = inputHeight;
407 constexpr static unsigned int outputChannels = inputChannels;
408 constexpr static unsigned int outputBatchSize = inputBatchSize;
409
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
411 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000412
413 // Set quantization parameters if the requested type is a quantized type.
414 if(armnn::IsQuantizedType<T>())
415 {
416 inputTensorInfo.SetQuantizationScale(qScale);
417 inputTensorInfo.SetQuantizationOffset(qOffset);
418 outputTensorInfo.SetQuantizationScale(qScale);
419 outputTensorInfo.SetQuantizationOffset(qOffset);
420 }
421
422 LayerTestResult<T, 4> result(inputTensorInfo);
423
424 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
425
426 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
427 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
428
telsoa01c577f2c2018-08-31 09:22:23 +0100429 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000430 armnn::ActivationQueueDescriptor descriptor;
431 armnn::WorkloadInfo workloadInfo;
432 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
433 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
434
435 descriptor.m_Parameters.m_Function = activationFunction;
436 descriptor.m_Parameters.m_A = activationParameterA;
437 descriptor.m_Parameters.m_B = activationParameterB;
438
439 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
440
441 inputHandle->Allocate();
442 outputHandle->Allocate();
443
444 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
445
446 workload->Execute();
447
448 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
449
telsoa01c577f2c2018-08-31 09:22:23 +0100450 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000451 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
452
453 return result;
454}
455
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000456template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000457LayerTestResult<T, 4> SimpleSigmoidTestCommon(
458 armnn::IWorkloadFactory& workloadFactory,
459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
460 float qScale,
461 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000462{
463 std::vector<float> inputData = {
464 -0.1f, -0.2f, -0.3f, -0.4f,
465 0.1f, 0.2f, 0.3f, 0.4f,
466 -1.0f, -2.0f, -3.0f, -4.0f,
467 1.0f, 2.0f, 3.0f, 4.0f
468 };
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Calculate output values for input.
telsoa014fcda012018-03-09 14:13:49 +0000471 auto f = [](float value)
472 {
473 return 1.0f / (1.0f + std::exp(-value));
474 };
475 std::vector<float> outputExpectedData(inputData.size());
476 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
477
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000478 return SimpleActivationTest<ArmnnType>(workloadFactory,
479 memoryManager,
480 armnn::ActivationFunction::Sigmoid,
481 0.f,
482 0.f,
483 qScale,
484 qOffset,
485 inputData,
486 outputExpectedData);
telsoa014fcda012018-03-09 14:13:49 +0000487}
488
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000489LayerTestResult<float, 4> SimpleSigmoidTest(
490 armnn::IWorkloadFactory& workloadFactory,
491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000492{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000493 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000494}
495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000496LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
497 armnn::IWorkloadFactory& workloadFactory,
498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000499{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000500 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Teresa Charlin18515e22019-04-24 10:17:46 +0100503LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
504 armnn::IWorkloadFactory& workloadFactory,
505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
506{
507 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
508}
509
510template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
511LayerTestResult<T, 4> ReLuTestCommon(
512 armnn::IWorkloadFactory& workloadFactory,
513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
514 float qScale,
515 int32_t qOffset)
516{
517 std::vector<float> inputData = {
518 -0.1f, -0.2f, -0.3f, -0.4f,
519 0.1f, 0.2f, 0.3f, 0.4f,
520 -1.0f, -2.0f, -3.0f, -4.0f,
521 1.0f, 2.0f, 3.0f, 4.0f
522 };
523
524 // Calculate output values for input.
525 auto f = [](float value)
526 {
527 return std::fmax(0.0f, value);
528 };
529 std::vector<float> outputExpectedData(inputData.size());
530 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
531
532 return SimpleActivationTest<ArmnnType>(workloadFactory,
533 memoryManager,
534 armnn::ActivationFunction::ReLu,
535 0.f,
536 0.f,
537 qScale,
538 qOffset,
539 inputData,
540 outputExpectedData);
541}
542
543LayerTestResult<int16_t, 4> ReLuInt16Test(
544 armnn::IWorkloadFactory& workloadFactory,
545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
546{
547 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
548}
549
550
konsof017f6db402019-06-07 15:15:58 +0100551LayerTestResult<uint8_t, 4> ReLuUint8Test(
552 armnn::IWorkloadFactory& workloadFactory,
553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
554{
555 return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
556}
557
558LayerTestResult<float, 4> ReLuTest(
559 armnn::IWorkloadFactory& workloadFactory,
560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
561{
562 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
563}
564
565
Teresa Charlin18515e22019-04-24 10:17:46 +0100566template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
567LayerTestResult<T, 4> BoundedReLuTestCommon(
568 armnn::IWorkloadFactory& workloadFactory,
569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
570 float qScale,
571 int32_t qOffset)
572{
573 std::vector<float> inputData = {
574 -0.1f, -0.2f, -0.3f, -0.4f,
575 0.1f, 0.2f, 0.3f, 0.4f,
576 -1.0f, -2.0f, -3.0f, -4.0f,
577 1.0f, 2.0f, 3.0f, 4.0f
578 };
579 const float a = 1.0f;
580 const float b = -1.0f;
581 // Calculate output values for input.
582 auto f = [a, b](float value)
583 {
584 return std::min(a, std::max(b, value));
585 };
586 std::vector<float> outputExpectedData(inputData.size());
587 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
588
589 return SimpleActivationTest<ArmnnType>(workloadFactory,
590 memoryManager,
591 armnn::ActivationFunction::BoundedReLu,
592 a,
593 b,
594 qScale,
595 qOffset,
596 inputData,
597 outputExpectedData);
598}
599
600LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
601 armnn::IWorkloadFactory& workloadFactory,
602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
603{
604 return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
605}
606
607
608
609template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
610LayerTestResult<T, 4> SoftReLuTestCommon(
611 armnn::IWorkloadFactory& workloadFactory,
612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
613 float qScale,
614 int32_t qOffset)
615{
616 std::vector<float> inputData = {
617 -0.1f, -0.2f, -0.3f, -0.4f,
618 0.1f, 0.2f, 0.3f, 0.4f,
619 -1.0f, -2.0f, -3.0f, -4.0f,
620 1.0f, 2.0f, 3.0f, 4.0f
621 };
622
623 // Calculate output values for input.
624 auto f = [](float value)
625 {
626 return std::log(1.0f + std::exp(value));
627 };
628 std::vector<float> outputExpectedData(inputData.size());
629 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
630
631 return SimpleActivationTest<ArmnnType>(workloadFactory,
632 memoryManager,
633 armnn::ActivationFunction::SoftReLu,
634 0.f,
635 0.f,
636 qScale,
637 qOffset,
638 inputData,
639 outputExpectedData);
640}
641
konsof017f6db402019-06-07 15:15:58 +0100642LayerTestResult<float, 4> SoftReLuTest(
643 armnn::IWorkloadFactory& workloadFactory,
644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
645{
646 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
647}
648
649LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
650 armnn::IWorkloadFactory& workloadFactory,
651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
652{
653 return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
654}
655
Teresa Charlin18515e22019-04-24 10:17:46 +0100656LayerTestResult<int16_t, 4> SoftReLuInt16Test(
657 armnn::IWorkloadFactory& workloadFactory,
658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
659{
660 return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
661}
662
663template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
664LayerTestResult<T, 4> LeakyReLuTestCommon(
665 armnn::IWorkloadFactory& workloadFactory,
666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
667 float qScale,
668 int32_t qOffset)
669{
670 std::vector<float> inputData = {
671 -0.1f, -0.2f, -0.3f, -0.4f,
672 0.1f, 0.2f, 0.3f, 0.4f,
673 -1.0f, -2.0f, -3.0f, -4.0f,
674 1.0f, 2.0f, 3.0f, 4.0f
675 };
676
677 const float a = 0.01f;
678 // Calculate output values for input.
679 auto f = [a](float value)
680 {
681 return value > 0.0f ? value : (value * a);
682 };
683 std::vector<float> outputExpectedData(inputData.size());
684 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
685
686 return SimpleActivationTest<ArmnnType>(workloadFactory,
687 memoryManager,
688 armnn::ActivationFunction::LeakyReLu,
689 a,
690 0.f,
691 qScale,
692 qOffset,
693 inputData,
694 outputExpectedData);
695}
696
konsof017f6db402019-06-07 15:15:58 +0100697LayerTestResult<float, 4> LeakyReLuTest(
698 armnn::IWorkloadFactory& workloadFactory,
699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
700{
701 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
702}
703
704LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
705 armnn::IWorkloadFactory& workloadFactory,
706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
707{
708 return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
709}
710
Teresa Charlin18515e22019-04-24 10:17:46 +0100711LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
712 armnn::IWorkloadFactory& workloadFactory,
713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
714{
715 return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
716}
717
718template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
719LayerTestResult<T, 4> AbsTestCommon(
720 armnn::IWorkloadFactory& workloadFactory,
721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
722 float qScale,
723 int32_t qOffset)
724{
725 std::vector<float> inputData = {
726 -0.1f, -0.2f, -0.3f, -0.4f,
727 0.1f, 0.2f, 0.3f, 0.4f,
728 -1.0f, -2.0f, -3.0f, -4.0f,
729 1.0f, 2.0f, 3.0f, 4.0f
730 };
731
732 // Calculate output values for input.
733 auto f = [](float value)
734 {
735 return std::abs(value);
736 };
737 std::vector<float> outputExpectedData(inputData.size());
738 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
739
740 return SimpleActivationTest<ArmnnType>(workloadFactory,
741 memoryManager,
742 armnn::ActivationFunction::Abs,
743 0.f,
744 0.f,
745 qScale,
746 qOffset,
747 inputData,
748 outputExpectedData);
749}
750
konsof017f6db402019-06-07 15:15:58 +0100751LayerTestResult<float, 4> AbsTest(
752 armnn::IWorkloadFactory& workloadFactory,
753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
754{
755 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
756}
757
758LayerTestResult<uint8_t, 4> AbsUint8Test(
759 armnn::IWorkloadFactory& workloadFactory,
760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
761{
762 return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
763}
764
Teresa Charlin18515e22019-04-24 10:17:46 +0100765LayerTestResult<int16_t, 4> AbsInt16Test(
766 armnn::IWorkloadFactory& workloadFactory,
767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
768{
769 return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
770}
771
772template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
773LayerTestResult<T, 4> SqrtTestCommon(
774 armnn::IWorkloadFactory& workloadFactory,
775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
776 float qScale,
777 int32_t qOffset)
778{
779 std::vector<float> inputData = {
780 0.1f, 0.2f, 0.3f, 0.4f,
781 0.1f, 0.2f, 0.3f, 0.4f,
782 1.0f, 2.0f, 3.0f, 4.0f,
783 1.0f, 2.0f, 3.0f, 4.0f
784 };
785
786 // Calculate output values for input.
787 auto f = [](float value)
788 {
789 return std::sqrt(value);
790 };
791 std::vector<float> outputExpectedData(inputData.size());
792 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
793
794 return SimpleActivationTest<ArmnnType>(workloadFactory,
795 memoryManager,
796 armnn::ActivationFunction::Sqrt,
797 0.f,
798 0.f,
799 qScale,
800 qOffset,
801 inputData,
802 outputExpectedData);
803}
804
konsof017f6db402019-06-07 15:15:58 +0100805LayerTestResult<float, 4> SqrtTest(
806 armnn::IWorkloadFactory& workloadFactory,
807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
808{
809 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
810}
811
812LayerTestResult<uint8_t, 4> SqrtUint8Test(
813 armnn::IWorkloadFactory& workloadFactory,
814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
815{
816 return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
817}
818
Teresa Charlin18515e22019-04-24 10:17:46 +0100819LayerTestResult<int16_t, 4> SqrtInt16Test(
820 armnn::IWorkloadFactory& workloadFactory,
821 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
822{
823 return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
824}
825
826template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
827LayerTestResult<T, 4> SquareTestCommon(
828 armnn::IWorkloadFactory& workloadFactory,
829 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
830 float qScale,
831 int32_t qOffset)
832{
833 std::vector<float> inputData = {
834 -0.1f, -0.2f, -0.3f, -0.4f,
835 0.1f, 0.2f, 0.3f, 0.4f,
836 -1.0f, -2.0f, -3.0f, -4.0f,
837 1.0f, 2.0f, 3.0f, 4.0f
838 };
839
840 // Calculate output values for input.
841 auto f = [](float value)
842 {
843 return std::pow(value,2);
844 };
845 std::vector<float> outputExpectedData(inputData.size());
846 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
847
848 return SimpleActivationTest<ArmnnType>(workloadFactory,
849 memoryManager,
850 armnn::ActivationFunction::Square,
851 0.f,
852 0.f,
853 qScale,
854 qOffset,
855 inputData,
856 outputExpectedData);
857}
858
konsof017f6db402019-06-07 15:15:58 +0100859LayerTestResult<float, 4> SquareTest(
860 armnn::IWorkloadFactory& workloadFactory,
861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
862{
863 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
864}
865
866LayerTestResult<uint8_t, 4> SquareUint8Test(
867 armnn::IWorkloadFactory& workloadFactory,
868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
869{
870 return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
871}
872
Teresa Charlin18515e22019-04-24 10:17:46 +0100873LayerTestResult<int16_t, 4> SquareInt16Test(
874 armnn::IWorkloadFactory& workloadFactory,
875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
876{
877 return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
878}
879
880template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
881LayerTestResult<T, 4> TanhTestCommon(
882 armnn::IWorkloadFactory& workloadFactory,
883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
884 float qScale,
885 int32_t qOffset)
886{
887 std::vector<float> inputData = {
888 -0.1f, -0.2f, -0.3f, -0.4f,
889 0.1f, 0.2f, 0.3f, 0.4f,
890 -1.0f, -2.0f, -3.0f, -4.0f,
891 1.0f, 2.0f, 3.0f, 4.0f
892 };
893
894 const float a = 2.0f;
895 const float b = 3.0f;
896 // Calculate output values for input.
897 auto f = [a, b](float value)
898 {
899 return a * tanhf(b * value);
900 };
901 std::vector<float> outputExpectedData(inputData.size());
902 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
903
904 return SimpleActivationTest<ArmnnType>(workloadFactory,
905 memoryManager,
906 armnn::ActivationFunction::TanH,
907 a,
908 b,
909 qScale,
910 qOffset,
911 inputData,
912 outputExpectedData);
913}
914
konsof017f6db402019-06-07 15:15:58 +0100915LayerTestResult<float, 4> TanhTest(
916 armnn::IWorkloadFactory& workloadFactory,
917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
918{
919 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
920}
921
922LayerTestResult<uint8_t, 4> TanhUint8Test(
923 armnn::IWorkloadFactory& workloadFactory,
924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
925{
926 return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
927}
928
Teresa Charlin18515e22019-04-24 10:17:46 +0100929LayerTestResult<int16_t, 4> TanhInt16Test(
930 armnn::IWorkloadFactory& workloadFactory,
931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
932{
933 return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
934}
935
936
937
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000938template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000939LayerTestResult<T,4> CompareActivationTestImpl(
940 armnn::IWorkloadFactory& workloadFactory,
941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
942 armnn::IWorkloadFactory& refWorkloadFactory,
943 armnn::ActivationFunction f,
944 unsigned int batchSize = 5,
945 float qScale = 0.0f,
946 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000947{
948 unsigned int width = 17;
949 unsigned int height = 29;
950 unsigned int channels = 2;
951
952 float a = 0.234f;
953 float b = -12.345f;
954
955 armnn::TensorInfo inputTensorInfo;
956 armnn::TensorInfo outputTensorInfo;
957
958 unsigned int shape[] = {batchSize, channels, height, width};
959
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000960 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
961 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000962
963 // Set quantization parameters if the requested type is a quantized type.
964 if(armnn::IsQuantizedType<T>())
965 {
966 inputTensorInfo.SetQuantizationScale(qScale);
967 inputTensorInfo.SetQuantizationOffset(qOffset);
968 outputTensorInfo.SetQuantizationScale(qScale);
969 outputTensorInfo.SetQuantizationOffset(qOffset);
970 }
971
972 float minVal = -10.f;
973 if (f == armnn::ActivationFunction::Sqrt)
974 {
975 minVal = 0.f;
976 }
977
978 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
979
980
981 LayerTestResult<T,4> ret(outputTensorInfo);
982 auto boostArrayExtents = boost::extents
983 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
984 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
985 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
986 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
987 ret.output.resize(boostArrayExtents);
988 ret.outputExpected.resize(boostArrayExtents);
989
990
991 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
992 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
993
994 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
995 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
996
997 armnn::ActivationQueueDescriptor data;
998 armnn::WorkloadInfo info;
999 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1000 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1001 data.m_Parameters.m_A = a;
1002 data.m_Parameters.m_B = b;
1003 data.m_Parameters.m_Function = f;
1004
1005 armnn::ActivationQueueDescriptor refData = data;
1006 armnn::WorkloadInfo refInfo = info;
1007 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1008 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1009
1010 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1011 BOOST_ASSERT(workload != nullptr);
1012 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1013 BOOST_ASSERT(workloadRef != nullptr);
1014
1015 inputHandle->Allocate();
1016 outputHandle->Allocate();
1017 inputHandleRef->Allocate();
1018 outputHandleRef->Allocate();
1019
1020 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1021 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1022
1023 workload->Execute();
1024 workloadRef->Execute();
1025
1026 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1027 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1028
1029 return ret;
1030}
1031
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001032LayerTestResult<float,4> CompareActivationTest(
1033 armnn::IWorkloadFactory& workloadFactory,
1034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1035 armnn::IWorkloadFactory& refWorkloadFactory,
1036 armnn::ActivationFunction f,
1037 unsigned int batchSize)
telsoa014fcda012018-03-09 14:13:49 +00001038{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001039 return CompareActivationTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001040 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
telsoa014fcda012018-03-09 14:13:49 +00001041}
1042
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001043LayerTestResult<uint8_t,4> CompareActivationUint8Test(
1044 armnn::IWorkloadFactory& workloadFactory,
1045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1046 armnn::IWorkloadFactory& refWorkloadFactory,
1047 armnn::ActivationFunction f)
telsoa014fcda012018-03-09 14:13:49 +00001048{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001049 return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001050 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +00001051}
Teresa Charlin18515e22019-04-24 10:17:46 +01001052
1053LayerTestResult<int16_t,4> CompareActivationInt16Test(
1054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1056 armnn::IWorkloadFactory& refWorkloadFactory,
1057 armnn::ActivationFunction f)
1058{
1059 return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
1060 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1061}