blob: ca6130299b40fc08e7599fd74b108b42d8f9c3b0 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include "ActivationFixture.hpp"
8#include "QuantizeHelper.hpp"
9
telsoa014fcda012018-03-09 14:13:49 +000010#include <armnn/ArmNN.hpp>
11#include <armnn/Tensor.hpp>
12#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000015#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
20#include <algorithm>
21
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000022template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<T, 4> BoundedReLuTestCommon(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 float upperBound,
27 float lowerBound,
28 float inputScale,
29 int32_t inputOffset,
30 float outputScale,
31 int32_t outputOffset,
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +000038{
39 unsigned int outputWidth = inputWidth;
40 unsigned int outputHeight = inputHeight;
41 unsigned int outputChannels = inputChannels;
42 unsigned int outputBatchSize = inputBatchSize;
43
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000044 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000045
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000046 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000047
48 if(armnn::IsQuantizedType<T>())
49 {
50 inputTensorInfo.SetQuantizationScale(inputScale);
51 inputTensorInfo.SetQuantizationOffset(inputOffset);
52
53 outputTensorInfo.SetQuantizationScale(outputScale);
54 outputTensorInfo.SetQuantizationOffset(outputOffset);
55 }
56
57 LayerTestResult<T, 4> result(inputTensorInfo);
58
59 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
60
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63
telsoa01c577f2c2018-08-31 09:22:23 +010064 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +000065 armnn::ActivationQueueDescriptor descriptor;
66 armnn::WorkloadInfo workloadInfo;
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69
70 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
71 descriptor.m_Parameters.m_A = upperBound;
72 descriptor.m_Parameters.m_B = lowerBound;
73
74 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
75
76 inputHandle->Allocate();
77 outputHandle->Allocate();
78
79 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
80
81 workload->Execute();
82
83 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
84
85 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
86
87 return result;
88}
89
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000090LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
91 armnn::IWorkloadFactory& workloadFactory,
92 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000093{
94 unsigned int inputWidth = 4u;
95 unsigned int inputHeight = 5u;
96 unsigned int inputChannels = 1u;
97 unsigned int inputBatchSize = 1;
98
99 std::vector<float> input = std::vector<float>{
100 -2.0f, 0.1f, 0.5f, 1.25f,
101 0.786f, 0.9875f, -1.5f, 0.384f,
102 1.0001f, 3.5f, 7.5f, 0.896f,
103 2.126f, 2.0f, 0.3f, 0.15f,
104 0.999f, 1.2f, 0.89f, 6.1f,
105 };
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000108 std::vector<float> output = std::vector<float>{
109 -1.0f, 0.1f, 0.5f, 1.0f,
110 0.786f, 0.9875f, -1.0f, 0.384f,
111 1.0f, 1.0f, 1.0f, 0.896f,
112 1.0f, 1.0f, 0.3f, 0.15f,
113 0.999f, 1.0f, 0.89f, 1.0f,
114 };
115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000117 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
118 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000119}
120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000121LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000124{
125 unsigned int inputWidth = 4u;
126 unsigned int inputHeight = 5u;
127 unsigned int inputChannels = 1u;
128 unsigned int inputBatchSize = 1;
129
130 std::vector<float> input = std::vector<float>{
131 -1.0f, 0.1f, 0.5f, 6.25f,
132 0.786f, 5.9875f, -0.5f, 0.384f,
133 6.0001f, 3.5f, 7.5f, 0.896f,
134 2.126f, 12.0f, 0.3f, 0.15f,
135 0.999f, 1.2f, 0.89f, 6.1f,
136 };
137
David Beckac42efd2018-09-26 17:41:13 +0100138 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000139 std::vector<float> output = std::vector<float>{
140 0.0f, 0.1f, 0.5f, 6.0f,
141 0.786f, 5.9875f, 0.0f, 0.384f,
142 6.0f, 3.5f, 6.0f, 0.896f,
143 2.126f, 6.0f, 0.3f, 0.15f,
144 0.999f, 1.2f, 0.89f, 6.0f,
145 };
146
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000147 return BoundedReLuTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000148 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
149 inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000150}
151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000152LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000155{
156 unsigned int inputWidth = 3u;
157 unsigned int inputHeight = 2u;
158 unsigned int inputChannels = 1u;
159 unsigned int inputBatchSize = 1;
160
161 std::vector<uint8_t> input = std::vector<uint8_t>{
162 51, 124, 28,
163 251, 8, 92
164 };
165
David Beckac42efd2018-09-26 17:41:13 +0100166 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000167 std::vector<uint8_t> output = std::vector<uint8_t>{
168 0, 122, 0,
169 255, 0, 58
170 };
171
172 float inputScale = 12.0f / 255.0f;
173 int32_t inputOffset = 63;
174 float outputScale = 6.0f / 255.0f;
175 int32_t outputOffset = 0;
176
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000177 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
178 workloadFactory, memoryManager, 6.0f, 0.0f,
179 inputScale, inputOffset, outputScale, outputOffset,
180 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000181}
182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000186{
187 unsigned int inputWidth = 3u;
188 unsigned int inputHeight = 2u;
189 unsigned int inputChannels = 1u;
190 unsigned int inputBatchSize = 1;
191
192 std::vector<uint8_t> input = std::vector<uint8_t>{
193 51, 230, 28,
194 251, 8, 92
195 };
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000198 std::vector<uint8_t> output = std::vector<uint8_t>{
199 51, 192, 32,
200 192, 32, 92
201 };
202
203 int32_t inputOffset = 112;
204 float inputScale = 0.0125f;
205
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000206 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
207 workloadFactory, memoryManager, 1.0f, -1.0f,
208 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
209 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
telsoa014fcda012018-03-09 14:13:49 +0000210}
211
212namespace
213{
214
215struct BoundedReLuRandomInputTestTraits
216{
217 constexpr static unsigned int inputHeight = 31u;
218 constexpr static unsigned int inputWidth = 19u;
219 constexpr static unsigned int inputChannels = 4u;
220 constexpr static unsigned int inputBatchSize = 2;
221
222 constexpr static unsigned int outputHeight = inputHeight;
223 constexpr static unsigned int outputWidth = inputWidth;
224 constexpr static unsigned int outputChannels = inputChannels;
225 constexpr static unsigned int outputBatchSize = inputBatchSize;
226
227 static armnn::TensorInfo GetInputTensorInfo()
228 {
229 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
230 armnn::DataType::Float32);
231 }
232
233 static armnn::TensorInfo GetOutputTensorInfo()
234 {
235 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
236 armnn::DataType::Float32);
237 }
238};
239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240boost::multi_array<float, 4> BoundedReLuRandomInputTest(
241 armnn::IWorkloadFactory& workloadFactory,
242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
243 float lowerBound,
244 float upperBound,
245 const armnn::ActivationDescriptor& activationDescriptor)
telsoa014fcda012018-03-09 14:13:49 +0000246{
247 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
248 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
249
250 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
253 // range [lowerBound, upperBound].
telsoa014fcda012018-03-09 14:13:49 +0000254 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
255
256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
258
telsoa01c577f2c2018-08-31 09:22:23 +0100259 // Set up bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000260 armnn::ActivationQueueDescriptor descriptor;
261 armnn::WorkloadInfo workloadInfo;
262 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
263 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
264 descriptor.m_Parameters = activationDescriptor;
265
266 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
267
268 inputHandle->Allocate();
269 outputHandle->Allocate();
270
271 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
272
273 workload->Execute();
274
275 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
276
277 return output;
278}
279
280} // namespace
281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000282LayerTestResult<float, 4> CompareBoundedReLuTest(
283 armnn::IWorkloadFactory& workloadFactory,
284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
285 armnn::IWorkloadFactory& refWorkloadFactory,
286 float upperBound,
287 float lowerBound)
telsoa014fcda012018-03-09 14:13:49 +0000288{
289 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
290
291 armnn::ActivationDescriptor activationDescriptor;
292 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
293 activationDescriptor.m_A = upperBound;
294 activationDescriptor.m_B = lowerBound;
295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000296 result.output = BoundedReLuRandomInputTest(
297 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
298 result.outputExpected = BoundedReLuRandomInputTest(
299 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
telsoa014fcda012018-03-09 14:13:49 +0000300
301 return result;
302}
303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000304template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000305LayerTestResult<T,4> ConstantLinearActivationTestCommon(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308 float qScale = 0.0f,
309 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000310{
311 unsigned int inputHeight = 20;
312 unsigned int inputWidth = 17;
313 unsigned int inputChannels = 3;
314 unsigned int batchSize = 5;
315
316 armnn::TensorInfo inputTensorInfo;
317 armnn::TensorInfo outputTensorInfo;
318
319 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000321 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
322 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000323
324 // Set quantization parameters if the requested type is a quantized type.
325 if(armnn::IsQuantizedType<T>())
326 {
327 inputTensorInfo.SetQuantizationScale(qScale);
328 inputTensorInfo.SetQuantizationOffset(qOffset);
329 outputTensorInfo.SetQuantizationScale(qScale);
330 outputTensorInfo.SetQuantizationOffset(qOffset);
331 }
332
333 LayerTestResult<T, 4> ret(outputTensorInfo);
334
335 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
337
telsoa01c577f2c2018-08-31 09:22:23 +0100338 // Do linear activation that should leave the tensor unchanged.
telsoa014fcda012018-03-09 14:13:49 +0000339 armnn::ActivationQueueDescriptor data;
340 armnn::WorkloadInfo info;
341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
343 data.m_Parameters.m_A = 1.0f;
344 data.m_Parameters.m_B = 0.0f;
345 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
346
347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
348
349 inputHandle->Allocate();
350 outputHandle->Allocate();
351
352 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
353 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
354
355 workload->Execute();
356
357 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
358
telsoa01c577f2c2018-08-31 09:22:23 +0100359 // Ensure output equals input.
telsoa014fcda012018-03-09 14:13:49 +0000360 ret.outputExpected = input;
361
362 return ret;
363}
364
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000365LayerTestResult<float, 4> ConstantLinearActivationTest(
366 armnn::IWorkloadFactory& workloadFactory,
367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000372LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000376 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
377 workloadFactory, memoryManager, 4.0f, 3);
telsoa014fcda012018-03-09 14:13:49 +0000378}
379
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000380template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000381LayerTestResult<T, 4> SimpleActivationTest(
382 armnn::IWorkloadFactory& workloadFactory,
383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
384 armnn::ActivationFunction activationFunction,
385 float activationParameterA,
386 float activationParameterB,
387 float qScale,
388 int32_t qOffset,
389 const std::vector<float>& inputData,
390 const std::vector<float>& outputExpectedData)
telsoa014fcda012018-03-09 14:13:49 +0000391{
392 constexpr static unsigned int inputWidth = 16u;
393 constexpr static unsigned int inputHeight = 1u;
394 constexpr static unsigned int inputChannels = 1u;
395 constexpr static unsigned int inputBatchSize = 1u;
396
397 constexpr static unsigned int outputWidth = inputWidth;
398 constexpr static unsigned int outputHeight = inputHeight;
399 constexpr static unsigned int outputChannels = inputChannels;
400 constexpr static unsigned int outputBatchSize = inputBatchSize;
401
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000402 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
403 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000404
405 // Set quantization parameters if the requested type is a quantized type.
406 if(armnn::IsQuantizedType<T>())
407 {
408 inputTensorInfo.SetQuantizationScale(qScale);
409 inputTensorInfo.SetQuantizationOffset(qOffset);
410 outputTensorInfo.SetQuantizationScale(qScale);
411 outputTensorInfo.SetQuantizationOffset(qOffset);
412 }
413
414 LayerTestResult<T, 4> result(inputTensorInfo);
415
416 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
417
418 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
419 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 // Setup bounded ReLu.
telsoa014fcda012018-03-09 14:13:49 +0000422 armnn::ActivationQueueDescriptor descriptor;
423 armnn::WorkloadInfo workloadInfo;
424 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
425 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
426
427 descriptor.m_Parameters.m_Function = activationFunction;
428 descriptor.m_Parameters.m_A = activationParameterA;
429 descriptor.m_Parameters.m_B = activationParameterB;
430
431 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
432
433 inputHandle->Allocate();
434 outputHandle->Allocate();
435
436 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
437
438 workload->Execute();
439
440 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
441
telsoa01c577f2c2018-08-31 09:22:23 +0100442 // Calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000443 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
444
445 return result;
446}
447
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000448template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000449LayerTestResult<T, 4> SimpleSigmoidTestCommon(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 float qScale,
453 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000454{
455 std::vector<float> inputData = {
456 -0.1f, -0.2f, -0.3f, -0.4f,
457 0.1f, 0.2f, 0.3f, 0.4f,
458 -1.0f, -2.0f, -3.0f, -4.0f,
459 1.0f, 2.0f, 3.0f, 4.0f
460 };
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462 // Calculate output values for input.
telsoa014fcda012018-03-09 14:13:49 +0000463 auto f = [](float value)
464 {
465 return 1.0f / (1.0f + std::exp(-value));
466 };
467 std::vector<float> outputExpectedData(inputData.size());
468 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
469
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000470 return SimpleActivationTest<ArmnnType>(workloadFactory,
471 memoryManager,
472 armnn::ActivationFunction::Sigmoid,
473 0.f,
474 0.f,
475 qScale,
476 qOffset,
477 inputData,
478 outputExpectedData);
telsoa014fcda012018-03-09 14:13:49 +0000479}
480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000481LayerTestResult<float, 4> SimpleSigmoidTest(
482 armnn::IWorkloadFactory& workloadFactory,
483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000484{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000485 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000486}
487
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000488LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
489 armnn::IWorkloadFactory& workloadFactory,
490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000491{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000492 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000493}
494
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000495template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000496LayerTestResult<T,4> CompareActivationTestImpl(
497 armnn::IWorkloadFactory& workloadFactory,
498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499 armnn::IWorkloadFactory& refWorkloadFactory,
500 armnn::ActivationFunction f,
501 unsigned int batchSize = 5,
502 float qScale = 0.0f,
503 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000504{
505 unsigned int width = 17;
506 unsigned int height = 29;
507 unsigned int channels = 2;
508
509 float a = 0.234f;
510 float b = -12.345f;
511
512 armnn::TensorInfo inputTensorInfo;
513 armnn::TensorInfo outputTensorInfo;
514
515 unsigned int shape[] = {batchSize, channels, height, width};
516
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000517 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
518 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000519
520 // Set quantization parameters if the requested type is a quantized type.
521 if(armnn::IsQuantizedType<T>())
522 {
523 inputTensorInfo.SetQuantizationScale(qScale);
524 inputTensorInfo.SetQuantizationOffset(qOffset);
525 outputTensorInfo.SetQuantizationScale(qScale);
526 outputTensorInfo.SetQuantizationOffset(qOffset);
527 }
528
529 float minVal = -10.f;
530 if (f == armnn::ActivationFunction::Sqrt)
531 {
532 minVal = 0.f;
533 }
534
535 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
536
537
538 LayerTestResult<T,4> ret(outputTensorInfo);
539 auto boostArrayExtents = boost::extents
540 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
541 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
542 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
543 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
544 ret.output.resize(boostArrayExtents);
545 ret.outputExpected.resize(boostArrayExtents);
546
547
548 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
549 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
550
551 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
552 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
553
554 armnn::ActivationQueueDescriptor data;
555 armnn::WorkloadInfo info;
556 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
557 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
558 data.m_Parameters.m_A = a;
559 data.m_Parameters.m_B = b;
560 data.m_Parameters.m_Function = f;
561
562 armnn::ActivationQueueDescriptor refData = data;
563 armnn::WorkloadInfo refInfo = info;
564 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
565 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
566
567 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
568 BOOST_ASSERT(workload != nullptr);
569 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
570 BOOST_ASSERT(workloadRef != nullptr);
571
572 inputHandle->Allocate();
573 outputHandle->Allocate();
574 inputHandleRef->Allocate();
575 outputHandleRef->Allocate();
576
577 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
578 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
579
580 workload->Execute();
581 workloadRef->Execute();
582
583 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
584 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
585
586 return ret;
587}
588
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000589LayerTestResult<float,4> CompareActivationTest(
590 armnn::IWorkloadFactory& workloadFactory,
591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
592 armnn::IWorkloadFactory& refWorkloadFactory,
593 armnn::ActivationFunction f,
594 unsigned int batchSize)
telsoa014fcda012018-03-09 14:13:49 +0000595{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000596 return CompareActivationTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000597 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
telsoa014fcda012018-03-09 14:13:49 +0000598}
599
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000600LayerTestResult<uint8_t,4> CompareActivationUint8Test(
601 armnn::IWorkloadFactory& workloadFactory,
602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
603 armnn::IWorkloadFactory& refWorkloadFactory,
604 armnn::ActivationFunction f)
telsoa014fcda012018-03-09 14:13:49 +0000605{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000606 return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000607 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
telsoa014fcda012018-03-09 14:13:49 +0000608}