blob: e23f92a5a9aece42fd0e77745d94f103216269f1 [file] [log] [blame]
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "QuantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01007
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008#include <ResolveType.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009
10#include <armnn/ArmNN.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010011
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000012#include <armnn/backends/IBackendInternal.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010013#include <backendsCommon/WorkloadFactory.hpp>
14
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010019
20namespace
21{
22
23template<typename T, std::size_t Dim>
24LayerTestResult<T, Dim> QuantizeTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 const armnn::TensorInfo& inputTensorInfo,
28 const armnn::TensorInfo& outputTensorInfo,
29 const std::vector<float>& inputData,
30 const std::vector<T>& expectedOutputData,
31 armnn::QuantizeQueueDescriptor descriptor)
32{
Derek Lambertic374ff02019-12-10 21:57:35 +000033 boost::ignore_unused(memoryManager);
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010034 boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
35
36 LayerTestResult<T, Dim> ret(outputTensorInfo);
37 ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
38
39 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
41
42 armnn::WorkloadInfo info;
43 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45
46 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
47
48 inputHandle->Allocate();
49 outputHandle->Allocate();
50
51 CopyDataToITensorHandle(inputHandle.get(), input.data());
52
53 ExecuteWorkload(*workload, memoryManager);
54
55 CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
56
57 return ret;
58}
59
60template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
61LayerTestResult<T, 4> QuantizeSimpleTest(
62 armnn::IWorkloadFactory& workloadFactory,
63 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
64{
65 armnn::QuantizeQueueDescriptor desc;
66
67 const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
68 const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1);
69
70 std::vector<float> inputData = std::vector<float>(
71 {
72 1.0f, 2.0f, 3.0f,
73 4.0f, 5.0f, 6.0f,
74 7.0f, 8.0f, 9.0f,
75 10.0f, 11.0f, 12.0f,
76 });
77
78 std::vector<T> expectedOutputData = std::vector<T>(
79 {
80 3, 5, 7,
81 9, 11, 13,
82 15, 17, 19,
83 21, 23, 25,
84 });
85
86 return QuantizeTestImpl<T, 4>(workloadFactory,
87 memoryManager,
88 inputTensorInfo,
89 outputTensorInfo,
90 inputData,
91 expectedOutputData,
92 desc);
93}
94
95template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
96LayerTestResult<T, 4> QuantizeClampTest(
97 armnn::IWorkloadFactory& workloadFactory,
98 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
99{
100 armnn::QuantizeQueueDescriptor desc;
101
102 const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32);
103 const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
104
105 const T max = std::numeric_limits<T>::max();
106 const T min = std::numeric_limits<T>::lowest();
107
108 std::vector<float> inputData = std::vector<float>(
109 {
110 -100.0f, 100.0f
111 });
112
113 std::vector<T> expectedOutputData = std::vector<T>(
114 {
115 min, max
116 });
117
118 return QuantizeTestImpl<T, 4>(workloadFactory,
119 memoryManager,
120 inputTensorInfo,
121 outputTensorInfo,
122 inputData,
123 expectedOutputData,
124 desc);
125}
126
127} // anonymous namespace
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100128
129LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
130 armnn::IWorkloadFactory& workloadFactory,
131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
132{
133 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
134}
135
136LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
137 armnn::IWorkloadFactory& workloadFactory,
138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
139{
140 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
141}
142
Finn Williamsfd271062019-12-04 14:27:27 +0000143LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
144 armnn::IWorkloadFactory& workloadFactory,
145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
146{
147 return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager);
148}
149
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100150LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
151 armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
153{
154 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
155}