Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #include "BatchNormalizationTestImpl.hpp" |
| 7 | |
| 8 | #include <DataLayoutIndexed.hpp> |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 9 | #include <QuantizeHelper.hpp> |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 10 | #include <ResolveType.hpp> |
| 11 | |
| 12 | #include <armnn/ArmNN.hpp> |
| 13 | |
| 14 | #include <backendsCommon/CpuTensorHandle.hpp> |
| 15 | #include <backendsCommon/IBackendInternal.hpp> |
| 16 | #include <backendsCommon/WorkloadFactory.hpp> |
| 17 | |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 18 | #include <backendsCommon/test/TensorCopyUtils.hpp> |
| 19 | #include <backendsCommon/test/WorkloadTestUtils.hpp> |
| 20 | |
| 21 | #include <test/TensorHelpers.hpp> |
| 22 | |
| 23 | namespace |
| 24 | { |
| 25 | |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 26 | using namespace armnnUtils; |
| 27 | |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 28 | template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> |
| 29 | LayerTestResult<T, 4> BatchNormTestImpl( |
| 30 | armnn::IWorkloadFactory& workloadFactory, |
| 31 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 32 | const armnn::TensorShape& inputOutputTensorShape, |
| 33 | const std::vector<float>& inputValues, |
| 34 | const std::vector<float>& expectedOutputValues, |
| 35 | float qScale, |
| 36 | int32_t qOffset, |
| 37 | armnn::DataLayout dataLayout) |
| 38 | { |
| 39 | armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType); |
| 40 | armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType); |
| 41 | |
| 42 | armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout); |
| 43 | |
| 44 | armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, |
| 45 | ArmnnType); |
| 46 | |
| 47 | // Set quantization parameters if the requested type is a quantized type. |
| 48 | if (armnn::IsQuantizedType<T>()) |
| 49 | { |
| 50 | inputTensorInfo.SetQuantizationScale(qScale); |
| 51 | inputTensorInfo.SetQuantizationOffset(qOffset); |
| 52 | outputTensorInfo.SetQuantizationScale(qScale); |
| 53 | outputTensorInfo.SetQuantizationOffset(qOffset); |
| 54 | tensorInfo.SetQuantizationScale(qScale); |
| 55 | tensorInfo.SetQuantizationOffset(qOffset); |
| 56 | } |
| 57 | |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 58 | auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 59 | |
| 60 | // These values are per-channel of the input. |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 61 | auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset)); |
| 62 | auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset)); |
| 63 | auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset)); |
| 64 | auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 65 | |
| 66 | LayerTestResult<T, 4> result(outputTensorInfo); |
| 67 | |
| 68 | result.outputExpected = MakeTensor<T, 4>(inputTensorInfo, |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 69 | QuantizedVector<T>(expectedOutputValues, qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 70 | |
| 71 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); |
| 72 | std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |
| 73 | |
| 74 | armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); |
| 75 | armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); |
| 76 | armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); |
| 77 | armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); |
| 78 | |
| 79 | armnn::BatchNormalizationQueueDescriptor descriptor; |
| 80 | descriptor.m_Mean = &meanTensor; |
| 81 | descriptor.m_Variance = &varianceTensor; |
| 82 | descriptor.m_Beta = &betaTensor; |
| 83 | descriptor.m_Gamma = &gammaTensor; |
| 84 | descriptor.m_Parameters.m_Eps = 0.0f; |
| 85 | descriptor.m_Parameters.m_DataLayout = dataLayout; |
| 86 | armnn::WorkloadInfo info; |
| 87 | |
| 88 | AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); |
| 89 | AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); |
| 90 | AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); |
| 91 | AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); |
| 92 | |
| 93 | AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); |
| 94 | AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); |
| 95 | |
| 96 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info); |
| 97 | |
| 98 | inputHandle->Allocate(); |
| 99 | outputHandle->Allocate(); |
| 100 | |
| 101 | CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); |
| 102 | |
| 103 | workload->Execute(); |
| 104 | |
| 105 | CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); |
| 106 | |
| 107 | return result; |
| 108 | } |
| 109 | |
| 110 | template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> |
| 111 | LayerTestResult<T,4> BatchNormTestNhwcImpl( |
| 112 | armnn::IWorkloadFactory& workloadFactory, |
| 113 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 114 | float qScale, |
| 115 | int32_t qOffset) |
| 116 | { |
| 117 | const unsigned int width = 2; |
| 118 | const unsigned int height = 3; |
| 119 | const unsigned int channels = 2; |
| 120 | const unsigned int num = 1; |
| 121 | |
| 122 | armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType); |
| 123 | armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType); |
| 124 | armnn::TensorInfo tensorInfo({channels}, ArmnnType); |
| 125 | |
| 126 | // Set quantization parameters if the requested type is a quantized type. |
| 127 | if(armnn::IsQuantizedType<T>()) |
| 128 | { |
| 129 | inputTensorInfo.SetQuantizationScale(qScale); |
| 130 | inputTensorInfo.SetQuantizationOffset(qOffset); |
| 131 | outputTensorInfo.SetQuantizationScale(qScale); |
| 132 | outputTensorInfo.SetQuantizationOffset(qOffset); |
| 133 | tensorInfo.SetQuantizationScale(qScale); |
| 134 | tensorInfo.SetQuantizationOffset(qOffset); |
| 135 | } |
| 136 | |
| 137 | auto input = MakeTensor<T, 4>(inputTensorInfo, |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 138 | QuantizedVector<T>( |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 139 | { |
| 140 | 1.f, 1.f, 4.f, 1.f, |
| 141 | 4.f, 4.f, 2.f, 1.f, |
| 142 | 1.f, -2.f, 6.f, 4.f |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 143 | }, |
| 144 | qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 145 | // These values are per-channel of the input. |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 146 | auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset)); |
| 147 | auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset)); |
| 148 | auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset)); |
| 149 | auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 150 | LayerTestResult<T,4> ret(outputTensorInfo); |
| 151 | |
| 152 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); |
| 153 | std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |
| 154 | |
| 155 | armnn::BatchNormalizationQueueDescriptor data; |
| 156 | armnn::WorkloadInfo info; |
| 157 | armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); |
| 158 | armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); |
| 159 | armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); |
| 160 | armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); |
| 161 | |
| 162 | AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); |
| 163 | AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); |
| 164 | AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); |
| 165 | AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); |
| 166 | |
| 167 | AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); |
| 168 | AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); |
| 169 | data.m_Mean = &meanTensor; |
| 170 | data.m_Variance = &varianceTensor; |
| 171 | data.m_Beta = &betaTensor; |
| 172 | data.m_Gamma = &gammaTensor; |
| 173 | data.m_Parameters.m_Eps = 0.0f; |
| 174 | data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; |
| 175 | |
| 176 | // For each channel: |
| 177 | // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), |
| 178 | // multiply by gamma and add beta |
| 179 | ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 180 | QuantizedVector<T>( |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 181 | { |
| 182 | 1.f, 3.f, 4.f, 3.f, |
| 183 | 4.f, 4.f, 2.f, 3.f, |
| 184 | 1.f, 2.f, 6.f, 4.f |
Aron Virginas-Tar | 48623a0 | 2019-10-22 10:00:28 +0100 | [diff] [blame] | 185 | }, |
| 186 | qScale, qOffset)); |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 187 | |
| 188 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); |
| 189 | |
| 190 | inputHandle->Allocate(); |
| 191 | outputHandle->Allocate(); |
| 192 | |
| 193 | CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); |
| 194 | |
| 195 | workload->Execute(); |
| 196 | |
| 197 | CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); |
| 198 | |
| 199 | return ret; |
| 200 | } |
| 201 | |
| 202 | } // anonymous namespace |
| 203 | |
Matthew Jackson | 9bff144 | 2019-09-12 09:08:23 +0100 | [diff] [blame] | 204 | LayerTestResult<float, 4> BatchNormFloat32Test( |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 205 | armnn::IWorkloadFactory& workloadFactory, |
| 206 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 207 | { |
| 208 | // BatchSize: 1 |
| 209 | // Channels: 2 |
| 210 | // Height: 3 |
| 211 | // Width: 2 |
| 212 | |
| 213 | const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; |
| 214 | std::vector<float> inputValues |
| 215 | { |
| 216 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 217 | 1.f, 4.f, |
| 218 | 4.f, 2.f, |
| 219 | 1.f, 6.f, |
| 220 | |
| 221 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 222 | 1.f, 1.f, |
| 223 | 4.f, 1.f, |
| 224 | -2.f, 4.f |
| 225 | }; |
| 226 | std::vector<float> expectedOutputValues |
| 227 | { |
| 228 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 229 | 1.f, 4.f, |
| 230 | 4.f, 2.f, |
| 231 | 1.f, 6.f, |
| 232 | |
| 233 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 234 | 3.f, 3.f, |
| 235 | 4.f, 3.f, |
| 236 | 2.f, 4.f |
| 237 | }; |
| 238 | |
| 239 | return BatchNormTestImpl<armnn::DataType::Float32>( |
| 240 | workloadFactory, |
| 241 | memoryManager, |
| 242 | inputOutputShape, |
| 243 | inputValues, |
| 244 | expectedOutputValues, |
| 245 | 0.f, |
| 246 | 0, |
| 247 | armnn::DataLayout::NCHW); |
| 248 | } |
| 249 | |
Matthew Jackson | 9bff144 | 2019-09-12 09:08:23 +0100 | [diff] [blame] | 250 | LayerTestResult<float, 4> BatchNormFloat32NhwcTest( |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 251 | armnn::IWorkloadFactory& workloadFactory, |
| 252 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 253 | { |
| 254 | // BatchSize: 1 |
| 255 | // Height: 3 |
| 256 | // Width: 2 |
| 257 | // Channels: 2 |
| 258 | |
| 259 | const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; |
| 260 | std::vector<float> inputValues |
| 261 | { |
| 262 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 263 | 1.f, 1.f, |
| 264 | 4.f, 1.f, |
| 265 | |
| 266 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 267 | 4.f, 4.f, |
| 268 | 2.f, 1.f, |
| 269 | |
| 270 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 271 | 1.f, -2.f, |
| 272 | 6.f, 4.f |
| 273 | }; |
| 274 | std::vector<float> expectedOutputValues |
| 275 | { |
| 276 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 277 | 1.f, 3.f, |
| 278 | 4.f, 3.f, |
| 279 | |
| 280 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 281 | 4.f, 4.f, |
| 282 | 2.f, 3.f, |
| 283 | |
| 284 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 285 | 1.f, 2.f, |
| 286 | 6.f, 4.f |
| 287 | }; |
| 288 | |
| 289 | return BatchNormTestImpl<armnn::DataType::Float32>( |
| 290 | workloadFactory, |
| 291 | memoryManager, |
| 292 | inputOutputShape, |
| 293 | inputValues, |
| 294 | expectedOutputValues, |
| 295 | 0.f, |
| 296 | 0, |
| 297 | armnn::DataLayout::NHWC); |
| 298 | } |
| 299 | |
Matthew Jackson | 9bff144 | 2019-09-12 09:08:23 +0100 | [diff] [blame] | 300 | LayerTestResult<armnn::Half, 4> BatchNormFloat16Test( |
| 301 | armnn::IWorkloadFactory& workloadFactory, |
| 302 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 303 | { |
| 304 | // BatchSize: 1 |
| 305 | // Channels: 2 |
| 306 | // Height: 3 |
| 307 | // Width: 2 |
| 308 | |
| 309 | const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; |
| 310 | std::vector<float> inputValues |
| 311 | { |
| 312 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 313 | 1.f, 4.f, |
| 314 | 4.f, 2.f, |
| 315 | 1.f, 6.f, |
| 316 | |
| 317 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 318 | 1.f, 1.f, |
| 319 | 4.f, 1.f, |
| 320 | -2.f, 4.f |
| 321 | }; |
| 322 | std::vector<float> expectedOutputValues |
| 323 | { |
| 324 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 325 | 1.f, 4.f, |
| 326 | 4.f, 2.f, |
| 327 | 1.f, 6.f, |
| 328 | |
| 329 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 330 | 3.f, 3.f, |
| 331 | 4.f, 3.f, |
| 332 | 2.f, 4.f |
| 333 | }; |
| 334 | |
| 335 | return BatchNormTestImpl<armnn::DataType::Float16>( |
| 336 | workloadFactory, |
| 337 | memoryManager, |
| 338 | inputOutputShape, |
| 339 | inputValues, |
| 340 | expectedOutputValues, |
| 341 | 0.f, |
| 342 | 0, |
| 343 | armnn::DataLayout::NCHW); |
| 344 | } |
| 345 | |
| 346 | LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest( |
| 347 | armnn::IWorkloadFactory& workloadFactory, |
| 348 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 349 | { |
| 350 | // BatchSize: 1 |
| 351 | // Height: 3 |
| 352 | // Width: 2 |
| 353 | // Channels: 2 |
| 354 | |
| 355 | const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; |
| 356 | std::vector<float> inputValues |
| 357 | { |
| 358 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 359 | 1.f, 1.f, |
| 360 | 4.f, 1.f, |
| 361 | |
| 362 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 363 | 4.f, 4.f, |
| 364 | 2.f, 1.f, |
| 365 | |
| 366 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 367 | 1.f, -2.f, |
| 368 | 6.f, 4.f |
| 369 | }; |
| 370 | std::vector<float> expectedOutputValues |
| 371 | { |
| 372 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 373 | 1.f, 3.f, |
| 374 | 4.f, 3.f, |
| 375 | |
| 376 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 377 | 4.f, 4.f, |
| 378 | 2.f, 3.f, |
| 379 | |
| 380 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 381 | 1.f, 2.f, |
| 382 | 6.f, 4.f |
| 383 | }; |
| 384 | |
| 385 | return BatchNormTestImpl<armnn::DataType::Float16>( |
| 386 | workloadFactory, |
| 387 | memoryManager, |
| 388 | inputOutputShape, |
| 389 | inputValues, |
| 390 | expectedOutputValues, |
| 391 | 0.f, |
| 392 | 0, |
| 393 | armnn::DataLayout::NHWC); |
| 394 | } |
| 395 | |
Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 396 | LayerTestResult<uint8_t, 4> BatchNormUint8Test( |
| 397 | armnn::IWorkloadFactory& workloadFactory, |
| 398 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 399 | { |
| 400 | // BatchSize: 1 |
| 401 | // Channels: 2 |
| 402 | // Height: 3 |
| 403 | // Width: 2 |
| 404 | |
| 405 | const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; |
| 406 | std::vector<float> inputValues |
| 407 | { |
| 408 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 409 | 1.f, 4.f, |
| 410 | 4.f, 2.f, |
| 411 | 1.f, 6.f, |
| 412 | |
| 413 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 414 | 1.f, 1.f, |
| 415 | 4.f, 1.f, |
| 416 | -2.f, 4.f |
| 417 | }; |
| 418 | std::vector<float> expectedOutputValues |
| 419 | { |
| 420 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 421 | 1.f, 4.f, |
| 422 | 4.f, 2.f, |
| 423 | 1.f, 6.f, |
| 424 | |
| 425 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 426 | 3.f, 3.f, |
| 427 | 4.f, 3.f, |
| 428 | 2.f, 4.f |
| 429 | }; |
| 430 | |
| 431 | return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>( |
| 432 | workloadFactory, |
| 433 | memoryManager, |
| 434 | inputOutputShape, |
| 435 | inputValues, |
| 436 | expectedOutputValues, |
| 437 | 1.f / 20.f, |
| 438 | 50, |
| 439 | armnn::DataLayout::NCHW); |
| 440 | } |
| 441 | |
| 442 | LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest( |
| 443 | armnn::IWorkloadFactory& workloadFactory, |
| 444 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 445 | { |
| 446 | // BatchSize: 1 |
| 447 | // Height: 3 |
| 448 | // Width: 2 |
| 449 | // Channels: 2 |
| 450 | |
| 451 | const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; |
| 452 | std::vector<float> inputValues |
| 453 | { |
| 454 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 455 | 1.f, 1.f, |
| 456 | 4.f, 1.f, |
| 457 | |
| 458 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 459 | 4.f, 4.f, |
| 460 | 2.f, 1.f, |
| 461 | |
| 462 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 463 | 1.f, -2.f, |
| 464 | 6.f, 4.f |
| 465 | }; |
| 466 | std::vector<float> expectedOutputValues |
| 467 | { |
| 468 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 469 | 1.f, 3.f, |
| 470 | 4.f, 3.f, |
| 471 | |
| 472 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 473 | 4.f, 4.f, |
| 474 | 2.f, 3.f, |
| 475 | |
| 476 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 477 | 1.f, 2.f, |
| 478 | 6.f, 4.f |
| 479 | }; |
| 480 | |
| 481 | return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>( |
| 482 | workloadFactory, |
| 483 | memoryManager, |
| 484 | inputOutputShape, inputValues, expectedOutputValues, |
| 485 | 1.f/20.f, 50, armnn::DataLayout::NHWC); |
| 486 | } |
| 487 | |
| 488 | LayerTestResult<int16_t, 4> BatchNormInt16Test( |
| 489 | armnn::IWorkloadFactory& workloadFactory, |
| 490 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 491 | { |
| 492 | // BatchSize: 1 |
| 493 | // Channels: 2 |
| 494 | // Height: 3 |
| 495 | // Width: 2 |
| 496 | |
| 497 | const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; |
| 498 | std::vector<float> inputValues |
| 499 | { |
| 500 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 501 | 1.f, 4.f, |
| 502 | 4.f, 2.f, |
| 503 | 1.f, 6.f, |
| 504 | |
| 505 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 506 | 1.f, 1.f, |
| 507 | 4.f, 1.f, |
| 508 | -2.f, 4.f |
| 509 | }; |
| 510 | std::vector<float> expectedOutputValues |
| 511 | { |
| 512 | // Batch 0, Channel 0, Height (3) x Width (2) |
| 513 | 1.f, 4.f, |
| 514 | 4.f, 2.f, |
| 515 | 1.f, 6.f, |
| 516 | |
| 517 | // Batch 0, Channel 1, Height (3) x Width (2) |
| 518 | 3.f, 3.f, |
| 519 | 4.f, 3.f, |
| 520 | 2.f, 4.f |
| 521 | }; |
| 522 | |
| 523 | return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>( |
| 524 | workloadFactory, |
| 525 | memoryManager, |
| 526 | inputOutputShape, |
| 527 | inputValues, |
| 528 | expectedOutputValues, |
| 529 | 1.f / 20.f, |
| 530 | 50, |
| 531 | armnn::DataLayout::NCHW); |
| 532 | } |
| 533 | |
| 534 | LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest( |
| 535 | armnn::IWorkloadFactory& workloadFactory, |
| 536 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) |
| 537 | { |
| 538 | // BatchSize: 1 |
| 539 | // Height: 3 |
| 540 | // Width: 2 |
| 541 | // Channels: 2 |
| 542 | |
| 543 | const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; |
| 544 | std::vector<float> inputValues |
| 545 | { |
| 546 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 547 | 1.f, 1.f, |
| 548 | 4.f, 1.f, |
| 549 | |
| 550 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 551 | 4.f, 4.f, |
| 552 | 2.f, 1.f, |
| 553 | |
| 554 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 555 | 1.f, -2.f, |
| 556 | 6.f, 4.f |
| 557 | }; |
| 558 | std::vector<float> expectedOutputValues |
| 559 | { |
| 560 | // Batch 0, Height 0, Width (2) x Channel (2) |
| 561 | 1.f, 3.f, |
| 562 | 4.f, 3.f, |
| 563 | |
| 564 | // Batch 0, Height 1, Width (2) x Channel (2) |
| 565 | 4.f, 4.f, |
| 566 | 2.f, 3.f, |
| 567 | |
| 568 | // Batch 0, Height 2, Width (2) x Channel (2) |
| 569 | 1.f, 2.f, |
| 570 | 6.f, 4.f |
| 571 | }; |
| 572 | |
| 573 | return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>( |
| 574 | workloadFactory, |
| 575 | memoryManager, |
| 576 | inputOutputShape, |
| 577 | inputValues, |
| 578 | expectedOutputValues, |
| 579 | 1.f / 20.f, |
| 580 | 50, |
| 581 | armnn::DataLayout::NHWC); |
| 582 | } |
| 583 | |
| 584 | LayerTestResult<float,4> CompareBatchNormTest( |
| 585 | armnn::IWorkloadFactory& workloadFactory, |
| 586 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 587 | armnn::IWorkloadFactory& refWorkloadFactory) |
| 588 | { |
| 589 | const unsigned int width = 2; |
| 590 | const unsigned int height = 3; |
| 591 | const unsigned int channels = 5; |
| 592 | const unsigned int batchSize = 3; |
| 593 | |
| 594 | armnn::TensorInfo inputTensorInfo; |
| 595 | armnn::TensorInfo outputTensorInfo; |
| 596 | armnn::TensorInfo tensorInfo; |
| 597 | |
| 598 | constexpr unsigned int shape[] = {batchSize, channels, height, width}; |
| 599 | constexpr unsigned int tensorShape[] = {channels}; |
| 600 | |
| 601 | inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); |
| 602 | outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); |
| 603 | tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32); |
| 604 | |
| 605 | auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312); |
| 606 | |
| 607 | auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123); |
| 608 | auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f); |
| 609 | auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123); |
| 610 | auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345); |
| 611 | |
| 612 | LayerTestResult<float,4> ret(outputTensorInfo); |
| 613 | |
| 614 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); |
| 615 | std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |
| 616 | |
| 617 | std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); |
| 618 | std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); |
| 619 | |
| 620 | armnn::BatchNormalizationQueueDescriptor data; |
| 621 | armnn::WorkloadInfo info; |
| 622 | armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); |
| 623 | armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); |
| 624 | armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); |
| 625 | armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); |
| 626 | |
| 627 | AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); |
| 628 | AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); |
| 629 | AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); |
| 630 | AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); |
| 631 | |
| 632 | AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); |
| 633 | AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); |
| 634 | data.m_Mean = &meanTensor; |
| 635 | data.m_Variance = &varianceTensor; |
| 636 | data.m_Beta = &betaTensor; |
| 637 | data.m_Gamma = &gammaTensor; |
| 638 | data.m_Parameters.m_Eps = 0.01f; |
| 639 | |
| 640 | armnn::BatchNormalizationQueueDescriptor refData = data; |
| 641 | armnn::WorkloadInfo refInfo = info; |
| 642 | SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); |
| 643 | SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); |
| 644 | |
| 645 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); |
| 646 | std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo); |
| 647 | |
| 648 | inputHandle->Allocate(); |
| 649 | outputHandle->Allocate(); |
| 650 | inputHandleRef->Allocate(); |
| 651 | outputHandleRef->Allocate(); |
| 652 | |
| 653 | CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); |
| 654 | CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); |
| 655 | |
| 656 | workload->PostAllocationConfigure(); |
| 657 | workload->Execute(); |
| 658 | workloadRef->PostAllocationConfigure(); |
| 659 | workloadRef->Execute(); |
| 660 | |
| 661 | CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); |
| 662 | CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); |
| 663 | |
| 664 | return ret; |
| 665 | } |