blob: 468a34dd62d4aacc0d48a839d83e664ab206a206 [file] [log] [blame]
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "InstanceNormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01009#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000014#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010015#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <backendsCommon/test/DataLayoutUtils.hpp>
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010018#include <backendsCommon/test/TensorCopyUtils.hpp>
19#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23namespace
24{
25
26template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
27LayerTestResult<T, 4> InstanceNormTestImpl(
28 armnn::IWorkloadFactory& workloadFactory,
29 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
30 const armnn::TensorInfo& inputTensorInfo,
31 const armnn::TensorInfo& outputTensorInfo,
32 const std::vector<float>& inputValues,
33 const std::vector<float>& expectedOutputValues,
34 armnn::InstanceNormalizationQueueDescriptor descriptor,
35 float qScale = 0.0f,
36 int32_t qOffset = 0)
37{
Derek Lambertic374ff02019-12-10 21:57:35 +000038 boost::ignore_unused(memoryManager);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010039 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
40 armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010041
42 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010043 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
44 armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010045
46 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
47 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
48
49 armnn::WorkloadInfo info;
50
51
52 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
53 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
54
55 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
56
57 inputHandle->Allocate();
58 outputHandle->Allocate();
59
60 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
61
62 workload->Execute();
63
64 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
65
66 return result;
67}
68
69template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
70LayerTestResult<T, 4> InstanceNormTest(
71 armnn::IWorkloadFactory& workloadFactory,
72 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
73 armnn::DataLayout dataLayout)
74{
75 // BatchSize: 2
76 // Height: 2
77 // Width: 2
78 // Channels: 2
79
80 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
81
82 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
83 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
84
85 std::vector<float> inputValues
86 {
87 // Batch 0, Height 0, Width 0 x Channel (2)
88 0.f, 1.f,
89 // Batch 0, Height 0, Width 1 x Channel (2)
90 0.f, 2.f,
91
92 // Batch 0, Height 1, Width 0 x Channel (2)
93 0.f, 2.f,
94 // Batch 0, Height 1, Width 1 x Channel (2)
95 0.f, 4.f,
96
97 // Batch 1, Height 0, Width 0 x Channel (2)
98 1.f, -1.f,
99 // Batch 1, Height 0, Width 1 x Channel (2)
100 -1.f, 2.f,
101
102 // Batch 1, Height 1, Width 0 x Channel (2)
103 -1.f, -2.f,
104 // Batch 1, Height 1, Width 1 x Channel (2)
105 1.f, 4.f
106 };
107
108 std::vector<float> expectedOutputValues
109 {
110 // Batch 0, Height 0, Width 0 x Channel (2)
111 0.f, -1.1470304f,
112 // Batch 0, Height 0, Width 1 x Channel (2)
113 0.f, -0.22940612f,
114 // Batch 0, Height 1, Width 0 x Channel (2)
115 0.f, -0.22940612f,
116 // Batch 0, Height 1, Width 1 x Channel (2)
117 0.f, 1.6058424f,
118
119 // Batch 1, Height 0, Width 0 x Channel (2)
120 0.99995005f, -0.7337929f,
121 // Batch 1, Height 0, Width 1 x Channel (2)
122 -0.99995005f, 0.52413774f,
123
124 // Batch 1, Height 1, Width 0 x Channel (2)
125 -0.99995005f, -1.1531031f,
126 // Batch 1, Height 1, Width 1 x Channel (2)
127 0.99995005f, 1.3627582f
128 };
129
130 if (dataLayout == armnn::DataLayout::NCHW)
131 {
132 PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
133 PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
134 }
135
136 armnn::InstanceNormalizationQueueDescriptor descriptor;
137 descriptor.m_Parameters.m_Eps = 0.0001f;
138 descriptor.m_Parameters.m_Beta = 0.0f;
139 descriptor.m_Parameters.m_Gamma = 1.0f;
140 descriptor.m_Parameters.m_DataLayout = dataLayout;
141
142 return InstanceNormTestImpl<ArmnnType>(
143 workloadFactory,
144 memoryManager,
145 inputTensorInfo,
146 outputTensorInfo,
147 inputValues,
148 expectedOutputValues,
149 descriptor);
150}
151
152template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
153LayerTestResult<T, 4> InstanceNormTest2(
154 armnn::IWorkloadFactory& workloadFactory,
155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
156 armnn::DataLayout dataLayout)
157{
158 // BatchSize: 2
159 // Height: 2
160 // Width: 2
161 // Channels: 2
162
163 const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
164
165 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
166 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
167
168 std::vector<float> inputValues
169 {
170 // Batch 0, Height 0, Width 0 x Channel (2)
171 0.f, 1.f,
172 // Batch 0, Height 0, Width 1 x Channel (2)
173 0.f, 2.f,
174
175 // Batch 0, Height 1, Width 0 x Channel (2)
176 0.f, 2.f,
177 // Batch 0, Height 1, Width 1 x Channel (2)
178 0.f, 4.f,
179
180 // Batch 1, Height 0, Width 0 x Channel (2)
181 1.f, -1.f,
182 // Batch 1, Height 0, Width 1 x Channel (2)
183 -1.f, 2.f,
184
185 // Batch 1, Height 1, Width 0 x Channel (2)
186 -1.f, -2.f,
187 // Batch 1, Height 1, Width 1 x Channel (2)
188 1.f, 4.f
189 };
190
191 std::vector<float> expectedOutputValues
192 {
193 // Batch 0, Height 0, Width 0 x Channel (2)
194 10.f, 7.7059393f,
195 // Batch 0, Height 0, Width 1 x Channel (2)
196 10.f, 9.541187f,
197
198 // Batch 0, Height 1, Width 0 x Channel (2)
199 10.f, 9.541187f,
200 // Batch 0, Height 1, Width 1 x Channel (2)
201 10.f, 13.211685f,
202
203 // Batch 1, Height 0, Width 0 x Channel (2)
204 11.9999f, 8.532414f,
205 // Batch 1, Height 0, Width 1 x Channel (2)
206 8.0001f, 11.048275f,
207
208 // Batch 1, Height 1, Width 0 x Channel (2)
209 8.0001f, 7.693794f,
210 // Batch 1, Height 1, Width 1 x Channel (2)
211 11.9999f, 12.725516f
212 };
213
214 if (dataLayout == armnn::DataLayout::NCHW)
215 {
216 PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
217 PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
218 }
219
220 armnn::InstanceNormalizationQueueDescriptor descriptor;
221 descriptor.m_Parameters.m_Eps = 0.0001f;
222 descriptor.m_Parameters.m_Beta = 10.0f;
223 descriptor.m_Parameters.m_Gamma = 2.0f;
224 descriptor.m_Parameters.m_DataLayout = dataLayout;
225
226 return InstanceNormTestImpl<ArmnnType>(
227 workloadFactory,
228 memoryManager,
229 inputTensorInfo,
230 outputTensorInfo,
231 inputValues,
232 expectedOutputValues,
233 descriptor);
234}
235
236} // anonymous namespace
237
238LayerTestResult<float, 4> InstanceNormFloat32Test(
239 armnn::IWorkloadFactory& workloadFactory,
240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
241 armnn::DataLayout dataLayout)
242{
243 return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
244}
245
246LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
247 armnn::IWorkloadFactory& workloadFactory,
248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
249 armnn::DataLayout dataLayout)
250{
251 return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
252}
253
254LayerTestResult<float, 4> InstanceNormFloat32Test2(
255 armnn::IWorkloadFactory& workloadFactory,
256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
257 armnn::DataLayout dataLayout)
258{
259 return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
260}
261
262LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
263 armnn::IWorkloadFactory& workloadFactory,
264 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
265 armnn::DataLayout dataLayout)
266{
267 return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
268}