blob: 7d5d73bf01c82eea4821fe0adf0ecc6200c3589b [file] [log] [blame]
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "QuantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01007
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008#include <ResolveType.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009
10#include <armnn/ArmNN.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010011
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010012#include <backendsCommon/IBackendInternal.hpp>
13#include <backendsCommon/WorkloadFactory.hpp>
14
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010019
20namespace
21{
22
23template<typename T, std::size_t Dim>
24LayerTestResult<T, Dim> QuantizeTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 const armnn::TensorInfo& inputTensorInfo,
28 const armnn::TensorInfo& outputTensorInfo,
29 const std::vector<float>& inputData,
30 const std::vector<T>& expectedOutputData,
31 armnn::QuantizeQueueDescriptor descriptor)
32{
33 boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
34
35 LayerTestResult<T, Dim> ret(outputTensorInfo);
36 ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
37
38 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
40
41 armnn::WorkloadInfo info;
42 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
43 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
44
45 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
46
47 inputHandle->Allocate();
48 outputHandle->Allocate();
49
50 CopyDataToITensorHandle(inputHandle.get(), input.data());
51
52 ExecuteWorkload(*workload, memoryManager);
53
54 CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
55
56 return ret;
57}
58
59template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
60LayerTestResult<T, 4> QuantizeSimpleTest(
61 armnn::IWorkloadFactory& workloadFactory,
62 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
63{
64 armnn::QuantizeQueueDescriptor desc;
65
66 const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
67 const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
68
69 std::vector<float> inputData = std::vector<float>(
70 {
71 1.0f, 2.0f, 3.0f,
72 4.0f, 5.0f, 6.0f,
73 7.0f, 8.0f, 9.0f,
74 10.0f, 11.0f, 12.0f,
75 });
76
77 std::vector<T> expectedOutputData = std::vector<T>(
78 {
79 3, 5, 7,
80 9, 11, 13,
81 15, 17, 19,
82 21, 23, 25,
83 });
84
85 return QuantizeTestImpl<T, 4>(workloadFactory,
86 memoryManager,
87 inputTensorInfo,
88 outputTensorInfo,
89 inputData,
90 expectedOutputData,
91 desc);
92}
93
94template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
95LayerTestResult<T, 4> QuantizeClampTest(
96 armnn::IWorkloadFactory& workloadFactory,
97 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
98{
99 armnn::QuantizeQueueDescriptor desc;
100
101 const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
102 const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
103
104 const T max = std::numeric_limits<T>::max();
105 const T min = std::numeric_limits<T>::lowest();
106
107 std::vector<float> inputData = std::vector<float>(
108 {
109 -100.0f, 100.0f
110 });
111
112 std::vector<T> expectedOutputData = std::vector<T>(
113 {
114 min, max
115 });
116
117 return QuantizeTestImpl<T, 4>(workloadFactory,
118 memoryManager,
119 inputTensorInfo,
120 outputTensorInfo,
121 inputData,
122 expectedOutputData,
123 desc);
124}
125
126} // anonymous namespace
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100127
128LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
129 armnn::IWorkloadFactory& workloadFactory,
130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
131{
132 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
133}
134
135LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
136 armnn::IWorkloadFactory& workloadFactory,
137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
138{
139 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
140}
141
142LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
143 armnn::IWorkloadFactory& workloadFactory,
144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
145{
146 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
147}