blob: 6325130218370bacc6bfabaceaaab22776f0bef0 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
8
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/ArmNN.hpp>
10#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000013#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <backendsCommon/WorkloadFactory.hpp>
15#include <backendsCommon/test/QuantizeHelper.hpp>
16
David Beckac42efd2018-09-26 17:41:13 +010017#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020LayerTestResult<T, 4> BatchNormTestImpl(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::TensorShape& inputOutputTensorShape,
24 const std::vector<float>& inputValues,
25 const std::vector<float>& expectedOutputValues,
26 float qScale,
27 int32_t qOffset,
28 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000029{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010030 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
31 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000032
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010033 armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
34
35 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
36 armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000037
38 // Set quantization parameters if the requested type is a quantized type.
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010039 if (armnn::IsQuantizedType<T>())
telsoa014fcda012018-03-09 14:13:49 +000040 {
41 inputTensorInfo.SetQuantizationScale(qScale);
42 inputTensorInfo.SetQuantizationOffset(qOffset);
43 outputTensorInfo.SetQuantizationScale(qScale);
44 outputTensorInfo.SetQuantizationOffset(qOffset);
45 tensorInfo.SetQuantizationScale(qScale);
46 tensorInfo.SetQuantizationOffset(qOffset);
47 }
48
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010049 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
50 QuantizedVector<T>(qScale, qOffset, inputValues));
telsoa014fcda012018-03-09 14:13:49 +000051
telsoa01c577f2c2018-08-31 09:22:23 +010052 // These values are per-channel of the input.
telsoa014fcda012018-03-09 14:13:49 +000053 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010054 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
55 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
56 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
57
58 LayerTestResult<T, 4> result(outputTensorInfo);
59
60 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
61 QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
telsoa014fcda012018-03-09 14:13:49 +000062
63 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
64 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
65
telsoa014fcda012018-03-09 14:13:49 +000066 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
67 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
68 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
69 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
70
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010071 armnn::BatchNormalizationQueueDescriptor descriptor;
72 descriptor.m_Mean = &meanTensor;
73 descriptor.m_Variance = &varianceTensor;
74 descriptor.m_Beta = &betaTensor;
75 descriptor.m_Gamma = &gammaTensor;
76 descriptor.m_Parameters.m_Eps = 0.0f;
77 descriptor.m_Parameters.m_DataLayout = dataLayout;
78 armnn::WorkloadInfo info;
79
telsoa014fcda012018-03-09 14:13:49 +000080 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
81 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
82 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
83 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
84
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010085 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000087
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010088 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +000089
90 inputHandle->Allocate();
91 outputHandle->Allocate();
92
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010093 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +000094
95 workload->Execute();
96
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010097 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000098
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010099 return result;
Matteo Martincigh539b44d2018-10-01 09:26:39 +0100100}
Nikhil Rajd1340932018-10-18 14:27:50 +0100101
102
103template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000104LayerTestResult<T,4> BatchNormTestNhwcImpl(
105 armnn::IWorkloadFactory& workloadFactory,
106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
107 float qScale,
108 int32_t qOffset)
Nikhil Rajd1340932018-10-18 14:27:50 +0100109{
110 const unsigned int width = 2;
111 const unsigned int height = 3;
112 const unsigned int channels = 2;
113 const unsigned int num = 1;
114
115 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
116 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
117 armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
118
119 // Set quantization parameters if the requested type is a quantized type.
120 if(armnn::IsQuantizedType<T>())
121 {
122 inputTensorInfo.SetQuantizationScale(qScale);
123 inputTensorInfo.SetQuantizationOffset(qOffset);
124 outputTensorInfo.SetQuantizationScale(qScale);
125 outputTensorInfo.SetQuantizationOffset(qOffset);
126 tensorInfo.SetQuantizationScale(qScale);
127 tensorInfo.SetQuantizationOffset(qOffset);
128 }
129
130 auto input = MakeTensor<T, 4>(inputTensorInfo,
131 QuantizedVector<T>(qScale, qOffset,
132 {
133 1.f, 1.f, 4.f, 1.f,
134 4.f, 4.f, 2.f, 1.f,
135 1.f, -2.f, 6.f, 4.f
136 }));
137 // These values are per-channel of the input.
138 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
139 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
140 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
141 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
142 LayerTestResult<T,4> ret(outputTensorInfo);
143
144 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
145 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
146
147 armnn::BatchNormalizationQueueDescriptor data;
148 armnn::WorkloadInfo info;
149 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
150 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
151 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
152 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
153
154 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
155 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
156 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
157 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
158
159 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
160 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
161 data.m_Mean = &meanTensor;
162 data.m_Variance = &varianceTensor;
163 data.m_Beta = &betaTensor;
164 data.m_Gamma = &gammaTensor;
165 data.m_Parameters.m_Eps = 0.0f;
166 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
167
168 // For each channel:
169 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
170 // multiply by gamma and add beta
171 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
172 QuantizedVector<T>(qScale, qOffset,
173 {
174 1.f, 3.f, 4.f, 3.f,
175 4.f, 4.f, 2.f, 3.f,
176 1.f, 2.f, 6.f, 4.f
177 }));
178
179 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
180
181 inputHandle->Allocate();
182 outputHandle->Allocate();
183
184 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
185
Nikhil Rajd1340932018-10-18 14:27:50 +0100186 workload->Execute();
187
188 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
189
190 return ret;
191}