Aron Virginas-Tar | 00d306e | 2019-08-28 18:08:46 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #include "FullyConnectedTestImpl.hpp" |
| 7 | |
| 8 | #include <armnn/ArmNN.hpp> |
| 9 | |
| 10 | #include <backendsCommon/CpuTensorHandle.hpp> |
| 11 | |
| 12 | #include <backendsCommon/test/DataTypeUtils.hpp> |
| 13 | #include <backendsCommon/test/TensorCopyUtils.hpp> |
| 14 | #include <backendsCommon/test/WorkloadTestUtils.hpp> |
| 15 | |
| 16 | #include <test/TensorHelpers.hpp> |
| 17 | |
| 18 | // |
| 19 | // Implementation templates |
| 20 | // |
| 21 | |
| 22 | template<typename T, typename B> |
| 23 | LayerTestResult<T, 2> SimpleFullyConnectedTestImpl( |
| 24 | armnn::IWorkloadFactory& workloadFactory, |
| 25 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 26 | armnn::TensorInfo inputTensorInfo, |
| 27 | armnn::TensorInfo outputTensorInfo, |
| 28 | armnn::TensorInfo weightsDesc, |
| 29 | armnn::TensorInfo biasesDesc, |
| 30 | boost::multi_array<T, 2>& weights, |
| 31 | boost::multi_array<B, 1>& bias, |
| 32 | boost::multi_array<T, 4>& input, |
| 33 | bool biasEnabled, |
| 34 | bool transposeWeights) |
| 35 | { |
| 36 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); |
| 37 | std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |
| 38 | |
| 39 | armnn::FullyConnectedQueueDescriptor data; |
| 40 | armnn::WorkloadInfo info; |
| 41 | armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); |
| 42 | armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); |
| 43 | |
| 44 | AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); |
| 45 | AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); |
| 46 | |
| 47 | AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); |
| 48 | AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); |
| 49 | data.m_Weight = &weightsTensor; |
| 50 | data.m_Bias = &biasTensor; |
| 51 | data.m_Parameters.m_BiasEnabled = biasEnabled; |
| 52 | data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; |
| 53 | |
| 54 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info); |
| 55 | LayerTestResult<T, 2> result(outputTensorInfo); |
| 56 | |
| 57 | inputHandle->Allocate(); |
| 58 | outputHandle->Allocate(); |
| 59 | CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); |
| 60 | |
| 61 | ExecuteWorkload(*workload, memoryManager); |
| 62 | |
| 63 | CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); |
| 64 | |
| 65 | return result; |
| 66 | } |
| 67 | |
| 68 | template<armnn::DataType ArmnnType, typename T> |
| 69 | LayerTestResult<T, 2> FullyConnectedTest( |
| 70 | armnn::IWorkloadFactory& workloadFactory, |
| 71 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 72 | bool biasEnabled) |
| 73 | { |
| 74 | constexpr static unsigned int inputWidth = 3u; |
| 75 | constexpr static unsigned int inputHeight = 2u; |
| 76 | constexpr static unsigned int inputChannels = 1u; |
| 77 | |
| 78 | constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; |
| 79 | |
| 80 | constexpr static unsigned int outputChannels = 2u; |
| 81 | |
| 82 | armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); |
| 83 | inputTensorInfo.SetQuantizationScale(0.1f); |
| 84 | inputTensorInfo.SetQuantizationOffset(63); |
| 85 | |
| 86 | armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); |
| 87 | outputTensorInfo.SetQuantizationScale(5.f); |
| 88 | outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); |
| 89 | |
| 90 | armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); |
| 91 | weightsDesc.SetQuantizationScale(0.2f); |
| 92 | weightsDesc.SetQuantizationOffset(93); |
| 93 | |
| 94 | armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); |
| 95 | biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); |
| 96 | biasesDesc.SetQuantizationOffset(0); |
| 97 | |
| 98 | LayerTestResult<T, 2> result(outputTensorInfo); |
| 99 | |
| 100 | auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>( |
| 101 | { |
| 102 | -1.2f, 6.1f, -3.5f, |
| 103 | 18.8f, -5.5f, 2.9f |
| 104 | }, |
| 105 | inputTensorInfo)); |
| 106 | |
| 107 | auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>( |
| 108 | { |
| 109 | -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, |
| 110 | 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f |
| 111 | }, |
| 112 | weightsDesc)); |
| 113 | |
| 114 | auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500}); |
| 115 | |
| 116 | result = SimpleFullyConnectedTestImpl<T>( |
| 117 | workloadFactory, |
| 118 | memoryManager, |
| 119 | inputTensorInfo, outputTensorInfo, |
| 120 | weightsDesc, biasesDesc, |
| 121 | weights, bias, input, |
| 122 | biasEnabled, true |
| 123 | ); |
| 124 | |
| 125 | if (biasEnabled) |
| 126 | { |
| 127 | result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, |
| 128 | ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo)); |
| 129 | } |
| 130 | else |
| 131 | { |
| 132 | result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, |
| 133 | ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo)); |
| 134 | } |
| 135 | |
| 136 | return result; |
| 137 | } |
| 138 | |
| 139 | // |
| 140 | // ArmNN variant of the AndroidNN fully_connected_float_large test. |
| 141 | // |
| 142 | // Tests the fully connected layer with large values, optionally transposing weights. |
| 143 | // Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode. |
| 144 | // |
| 145 | template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> |
| 146 | LayerTestResult<T, 2> FullyConnectedLargeTestCommon( |
| 147 | armnn::IWorkloadFactory& workloadFactory, |
| 148 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 149 | bool transposeWeights, |
| 150 | float qScale = 0.0f, |
| 151 | int32_t qOffset = 0) |
| 152 | { |
| 153 | unsigned int inputWidth = 1; |
| 154 | unsigned int inputHeight = 1; |
| 155 | unsigned int inputChannels = 5; |
| 156 | unsigned int inputNum = 1; |
| 157 | |
| 158 | unsigned int outputChannels = 1; |
| 159 | unsigned int outputNum = 1; |
| 160 | |
| 161 | // Define the tensor descriptors. |
| 162 | armnn::TensorInfo inputTensorInfo; |
| 163 | armnn::TensorInfo outputTensorInfo; |
| 164 | armnn::TensorInfo weightsDesc; |
| 165 | armnn::TensorInfo biasesDesc; |
| 166 | |
| 167 | unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; |
| 168 | unsigned int outputShape[] = { outputNum, outputChannels }; |
| 169 | unsigned int weightsShape[] = { inputChannels, outputChannels }; |
| 170 | if (transposeWeights) |
| 171 | { |
| 172 | std::swap(weightsShape[0], weightsShape[1]); |
| 173 | } |
| 174 | |
| 175 | unsigned int biasShape[] = { outputChannels }; |
| 176 | |
| 177 | inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); |
| 178 | outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); |
| 179 | weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType); |
| 180 | biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType); |
| 181 | |
| 182 | // Set quantization parameters if the requested type is a quantized type. |
| 183 | if(armnn::IsQuantizedType<T>()) |
| 184 | { |
| 185 | inputTensorInfo.SetQuantizationScale(qScale); |
| 186 | inputTensorInfo.SetQuantizationOffset(qOffset); |
| 187 | outputTensorInfo.SetQuantizationScale(qScale); |
| 188 | outputTensorInfo.SetQuantizationOffset(qOffset); |
| 189 | } |
| 190 | |
| 191 | LayerTestResult<T, 2> result(outputTensorInfo); |
| 192 | |
| 193 | boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, |
| 194 | QuantizedVector<T>(qScale, qOffset, { |
| 195 | 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f, |
| 196 | }) |
| 197 | ); |
| 198 | |
| 199 | boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc, |
| 200 | QuantizedVector<T>(qScale, qOffset, { |
| 201 | 2.0f, 3.0f, 4.0f, 5.0f, 6.0f |
| 202 | }) |
| 203 | ); |
| 204 | |
| 205 | std::vector<T> biasValues({900000.f}); |
| 206 | boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues); |
| 207 | |
| 208 | result = SimpleFullyConnectedTestImpl<T>( |
| 209 | workloadFactory, |
| 210 | memoryManager, |
| 211 | inputTensorInfo, outputTensorInfo, |
| 212 | weightsDesc, biasesDesc, |
| 213 | weights, bias, input, |
| 214 | true, transposeWeights |
| 215 | ); |
| 216 | |
| 217 | result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, |
| 218 | QuantizedVector<T>(qScale, qOffset, { |
| 219 | 965432.0f, |
| 220 | }) |
| 221 | ); |
| 222 | |
| 223 | return result; |
| 224 | } |
| 225 | |
| 226 | // |
| 227 | // Explicit template specializations |
| 228 | // |
| 229 | |
| 230 | template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2> |
| 231 | FullyConnectedTest<armnn::DataType::QuantisedAsymm8>( |
| 232 | armnn::IWorkloadFactory& workloadFactory, |
| 233 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 234 | bool biasEnabled); |
| 235 | |
| 236 | template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2> |
| 237 | FullyConnectedTest<armnn::DataType::QuantisedSymm16>( |
| 238 | armnn::IWorkloadFactory& workloadFactory, |
| 239 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 240 | bool biasEnabled); |
| 241 | |
| 242 | // |
| 243 | // Implementation functions |
| 244 | // |
| 245 | |
| 246 | LayerTestResult<float, 2> FullyConnectedFloat32Test( |
| 247 | armnn::IWorkloadFactory& workloadFactory, |
| 248 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 249 | bool biasEnabled, |
| 250 | bool transposeWeights) |
| 251 | { |
| 252 | unsigned int inputWidth = 1; |
| 253 | unsigned int inputHeight = 1; |
| 254 | unsigned int inputChannels = 5; |
| 255 | unsigned int inputNum = 2; |
| 256 | |
| 257 | unsigned int outputChannels = 3; |
| 258 | unsigned int outputNum = 2; |
| 259 | |
| 260 | // Define the tensor descriptors. |
| 261 | armnn::TensorInfo inputTensorInfo; |
| 262 | armnn::TensorInfo outputTensorInfo; |
| 263 | armnn::TensorInfo weightsDesc; |
| 264 | armnn::TensorInfo biasesDesc; |
| 265 | |
| 266 | unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; |
| 267 | unsigned int outputShape[] = { outputNum, outputChannels }; |
| 268 | unsigned int weightsShape[] = { inputChannels, outputChannels }; |
| 269 | |
| 270 | if (transposeWeights) |
| 271 | { |
| 272 | std::swap(weightsShape[0], weightsShape[1]); |
| 273 | } |
| 274 | |
| 275 | unsigned int biasShape[] = { outputChannels }; |
| 276 | |
| 277 | inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); |
| 278 | outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32); |
| 279 | weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32); |
| 280 | biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32); |
| 281 | |
| 282 | LayerTestResult<float, 2> result(outputTensorInfo); |
| 283 | |
| 284 | boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>( |
| 285 | { |
| 286 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, |
| 287 | |
| 288 | 5.0f, 4.0f, 3.0f, 2.0f, 1.0f |
| 289 | }) |
| 290 | ); |
| 291 | |
| 292 | boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>( |
| 293 | { |
| 294 | .5f, 2.f, .5f, |
| 295 | .5f, 2.f, 1.f, |
| 296 | .5f, 2.f, 2.f, |
| 297 | .5f, 2.f, 3.f, |
| 298 | .5f, 2.f, 4.f |
| 299 | })); |
| 300 | |
| 301 | if (transposeWeights) |
| 302 | { |
| 303 | weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>( |
| 304 | { |
| 305 | .5f, .5f, .5f, .5f, .5f, |
| 306 | 2.f, 2.f, 2.f, 2.f, 2.f, |
| 307 | .5f, 1.f, 2.f, 3.f, 4.f |
| 308 | })); |
| 309 | } |
| 310 | |
| 311 | |
| 312 | std::vector<float> biasValues({0.f, 0.f, 0.f}); |
| 313 | if (biasEnabled) |
| 314 | { |
| 315 | biasValues = std::vector<float>({10.f, 20.f, 30.f}); |
| 316 | } |
| 317 | boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues); |
| 318 | |
| 319 | result = SimpleFullyConnectedTestImpl<float>( |
| 320 | workloadFactory, |
| 321 | memoryManager, |
| 322 | inputTensorInfo, outputTensorInfo, |
| 323 | weightsDesc, biasesDesc, |
| 324 | weights, bias, input, |
| 325 | biasEnabled, transposeWeights |
| 326 | ); |
| 327 | |
| 328 | result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>( |
| 329 | { |
| 330 | 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0], |
| 331 | 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1], |
| 332 | 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2], |
| 333 | |
| 334 | 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0], |
| 335 | 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1], |
| 336 | 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2] |
| 337 | }) |
| 338 | ); |
| 339 | |
| 340 | return result; |
| 341 | } |
| 342 | |
| 343 | LayerTestResult<float, 2> FullyConnectedLargeTest( |
| 344 | armnn::IWorkloadFactory& workloadFactory, |
| 345 | const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |
| 346 | bool transposeWeights) |
| 347 | { |
| 348 | return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights); |
| 349 | } |