blob: ae28bc03f33c61f13a61b2e58a54257be5f4d4e3 [file] [log] [blame]
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "InstanceNormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01009#include <ResolveType.hpp>
10
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010011
12#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000013#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010014#include <backendsCommon/WorkloadFactory.hpp>
15
16#include <backendsCommon/test/DataLayoutUtils.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010017#include <backendsCommon/test/TensorCopyUtils.hpp>
18#include <backendsCommon/test/WorkloadTestUtils.hpp>
19
20#include <test/TensorHelpers.hpp>
21
22namespace
23{
24
25template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26LayerTestResult<T, 4> InstanceNormTestImpl(
27 armnn::IWorkloadFactory& workloadFactory,
28 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
29 const armnn::TensorInfo& inputTensorInfo,
30 const armnn::TensorInfo& outputTensorInfo,
31 const std::vector<float>& inputValues,
32 const std::vector<float>& expectedOutputValues,
33 armnn::InstanceNormalizationQueueDescriptor descriptor,
34 float qScale = 0.0f,
35 int32_t qOffset = 0)
36{
Derek Lambertic374ff02019-12-10 21:57:35 +000037 boost::ignore_unused(memoryManager);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010038 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
39 armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010040
41 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010042 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
43 armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010044
45 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
46 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
47
48 armnn::WorkloadInfo info;
49
50
51 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
52 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
53
54 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
55
56 inputHandle->Allocate();
57 outputHandle->Allocate();
58
59 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
60
61 workload->Execute();
62
63 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
64
65 return result;
66}
67
68template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
69LayerTestResult<T, 4> InstanceNormTest(
70 armnn::IWorkloadFactory& workloadFactory,
71 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
72 armnn::DataLayout dataLayout)
73{
74 // BatchSize: 2
75 // Height: 2
76 // Width: 2
77 // Channels: 2
78
79 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
80
81 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
82 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
83
84 std::vector<float> inputValues
85 {
86 // Batch 0, Height 0, Width 0 x Channel (2)
87 0.f, 1.f,
88 // Batch 0, Height 0, Width 1 x Channel (2)
89 0.f, 2.f,
90
91 // Batch 0, Height 1, Width 0 x Channel (2)
92 0.f, 2.f,
93 // Batch 0, Height 1, Width 1 x Channel (2)
94 0.f, 4.f,
95
96 // Batch 1, Height 0, Width 0 x Channel (2)
97 1.f, -1.f,
98 // Batch 1, Height 0, Width 1 x Channel (2)
99 -1.f, 2.f,
100
101 // Batch 1, Height 1, Width 0 x Channel (2)
102 -1.f, -2.f,
103 // Batch 1, Height 1, Width 1 x Channel (2)
104 1.f, 4.f
105 };
106
107 std::vector<float> expectedOutputValues
108 {
109 // Batch 0, Height 0, Width 0 x Channel (2)
110 0.f, -1.1470304f,
111 // Batch 0, Height 0, Width 1 x Channel (2)
112 0.f, -0.22940612f,
113 // Batch 0, Height 1, Width 0 x Channel (2)
114 0.f, -0.22940612f,
115 // Batch 0, Height 1, Width 1 x Channel (2)
116 0.f, 1.6058424f,
117
118 // Batch 1, Height 0, Width 0 x Channel (2)
119 0.99995005f, -0.7337929f,
120 // Batch 1, Height 0, Width 1 x Channel (2)
121 -0.99995005f, 0.52413774f,
122
123 // Batch 1, Height 1, Width 0 x Channel (2)
124 -0.99995005f, -1.1531031f,
125 // Batch 1, Height 1, Width 1 x Channel (2)
126 0.99995005f, 1.3627582f
127 };
128
129 if (dataLayout == armnn::DataLayout::NCHW)
130 {
131 PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
132 PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
133 }
134
135 armnn::InstanceNormalizationQueueDescriptor descriptor;
136 descriptor.m_Parameters.m_Eps = 0.0001f;
137 descriptor.m_Parameters.m_Beta = 0.0f;
138 descriptor.m_Parameters.m_Gamma = 1.0f;
139 descriptor.m_Parameters.m_DataLayout = dataLayout;
140
141 return InstanceNormTestImpl<ArmnnType>(
142 workloadFactory,
143 memoryManager,
144 inputTensorInfo,
145 outputTensorInfo,
146 inputValues,
147 expectedOutputValues,
148 descriptor);
149}
150
151template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
152LayerTestResult<T, 4> InstanceNormTest2(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
155 armnn::DataLayout dataLayout)
156{
157 // BatchSize: 2
158 // Height: 2
159 // Width: 2
160 // Channels: 2
161
162 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
163
164 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
165 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
166
167 std::vector<float> inputValues
168 {
169 // Batch 0, Height 0, Width 0 x Channel (2)
170 0.f, 1.f,
171 // Batch 0, Height 0, Width 1 x Channel (2)
172 0.f, 2.f,
173
174 // Batch 0, Height 1, Width 0 x Channel (2)
175 0.f, 2.f,
176 // Batch 0, Height 1, Width 1 x Channel (2)
177 0.f, 4.f,
178
179 // Batch 1, Height 0, Width 0 x Channel (2)
180 1.f, -1.f,
181 // Batch 1, Height 0, Width 1 x Channel (2)
182 -1.f, 2.f,
183
184 // Batch 1, Height 1, Width 0 x Channel (2)
185 -1.f, -2.f,
186 // Batch 1, Height 1, Width 1 x Channel (2)
187 1.f, 4.f
188 };
189
190 std::vector<float> expectedOutputValues
191 {
192 // Batch 0, Height 0, Width 0 x Channel (2)
193 10.f, 7.7059393f,
194 // Batch 0, Height 0, Width 1 x Channel (2)
195 10.f, 9.541187f,
196
197 // Batch 0, Height 1, Width 0 x Channel (2)
198 10.f, 9.541187f,
199 // Batch 0, Height 1, Width 1 x Channel (2)
200 10.f, 13.211685f,
201
202 // Batch 1, Height 0, Width 0 x Channel (2)
203 11.9999f, 8.532414f,
204 // Batch 1, Height 0, Width 1 x Channel (2)
205 8.0001f, 11.048275f,
206
207 // Batch 1, Height 1, Width 0 x Channel (2)
208 8.0001f, 7.693794f,
209 // Batch 1, Height 1, Width 1 x Channel (2)
210 11.9999f, 12.725516f
211 };
212
213 if (dataLayout == armnn::DataLayout::NCHW)
214 {
215 PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
216 PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
217 }
218
219 armnn::InstanceNormalizationQueueDescriptor descriptor;
220 descriptor.m_Parameters.m_Eps = 0.0001f;
221 descriptor.m_Parameters.m_Beta = 10.0f;
222 descriptor.m_Parameters.m_Gamma = 2.0f;
223 descriptor.m_Parameters.m_DataLayout = dataLayout;
224
225 return InstanceNormTestImpl<ArmnnType>(
226 workloadFactory,
227 memoryManager,
228 inputTensorInfo,
229 outputTensorInfo,
230 inputValues,
231 expectedOutputValues,
232 descriptor);
233}
234
235} // anonymous namespace
236
237LayerTestResult<float, 4> InstanceNormFloat32Test(
238 armnn::IWorkloadFactory& workloadFactory,
239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
240 armnn::DataLayout dataLayout)
241{
242 return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
243}
244
245LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
246 armnn::IWorkloadFactory& workloadFactory,
247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
248 armnn::DataLayout dataLayout)
249{
250 return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
251}
252
253LayerTestResult<float, 4> InstanceNormFloat32Test2(
254 armnn::IWorkloadFactory& workloadFactory,
255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
256 armnn::DataLayout dataLayout)
257{
258 return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
259}
260
261LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
262 armnn::IWorkloadFactory& workloadFactory,
263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
264 armnn::DataLayout dataLayout)
265{
266 return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
267}