blob: 402a3e6d51c70cd344b6b14f155955362773a9a8 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01006#include <ResolveType.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008#include <backendsCommon/IBackendInternal.hpp>
9
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000010LayerTestResult<float, 2> FullyConnectedFloat32Test(
11 armnn::IWorkloadFactory& workloadFactory,
12 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
13 bool biasEnabled,
telsoa014fcda012018-03-09 14:13:49 +000014 bool transposeWeights)
15{
16 unsigned int inputWidth = 1;
17 unsigned int inputHeight = 1;
18 unsigned int inputChannels = 5;
19 unsigned int inputNum = 2;
20
21 unsigned int outputChannels = 3;
22 unsigned int outputNum = 2;
23
telsoa01c577f2c2018-08-31 09:22:23 +010024 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000025 armnn::TensorInfo inputTensorInfo;
26 armnn::TensorInfo outputTensorInfo;
27 armnn::TensorInfo weightsDesc;
28 armnn::TensorInfo biasesDesc;
29
30 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
31 unsigned int outputShape[] = { outputNum, outputChannels };
32 unsigned int weightsShape[] = { inputChannels, outputChannels };
33 if (transposeWeights)
34 {
35 std::swap(weightsShape[0], weightsShape[1]);
36 }
37 unsigned int biasShape[] = { outputChannels };
38
39 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
40 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
41 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
42 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
43
44 LayerTestResult<float, 2> result(outputTensorInfo);
45
46 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
47 {
48 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
49
50 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
51 })
52 );
53
54 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
55 {
56 .5f, 2.f, .5f,
57 .5f, 2.f, 1.f,
58 .5f, 2.f, 2.f,
59 .5f, 2.f, 3.f,
60 .5f, 2.f, 4.f
61 }));
62
63 if (transposeWeights)
64 {
65 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
66 {
67 .5f, .5f, .5f, .5f, .5f,
68 2.f, 2.f, 2.f, 2.f, 2.f,
69 .5f, 1.f, 2.f, 3.f, 4.f
70 }));
71 }
72
73
74 std::vector<float> biasValues({0.f, 0.f, 0.f});
75 if (biasEnabled)
76 {
77 biasValues = std::vector<float>({10.f, 20.f, 30.f});
78 }
79 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
80
81 result = SimpleFullyConnectedTestImpl<float>(
82 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000083 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +000084 inputTensorInfo, outputTensorInfo,
85 weightsDesc, biasesDesc,
86 weights, bias, input,
87 biasEnabled, transposeWeights
88 );
89
90 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
91 {
92 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
93 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
94 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
95
96 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
97 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
98 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
99 })
100 );
101
102 return result;
103}
104
telsoa014fcda012018-03-09 14:13:49 +0000105//
106// ArmNN variant of the AndroidNN fully_connected_float_large test.
107//
108// Tests the fully connected layer with large values, optionally transposing weights.
109// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
110//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000112LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
113 armnn::IWorkloadFactory& workloadFactory,
114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
115 bool transposeWeights,
116 float qScale = 0.0f,
117 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000118{
119 unsigned int inputWidth = 1;
120 unsigned int inputHeight = 1;
121 unsigned int inputChannels = 5;
122 unsigned int inputNum = 1;
123
124 unsigned int outputChannels = 1;
125 unsigned int outputNum = 1;
126
telsoa01c577f2c2018-08-31 09:22:23 +0100127 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000128 armnn::TensorInfo inputTensorInfo;
129 armnn::TensorInfo outputTensorInfo;
130 armnn::TensorInfo weightsDesc;
131 armnn::TensorInfo biasesDesc;
132
133 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
134 unsigned int outputShape[] = { outputNum, outputChannels };
135 unsigned int weightsShape[] = { inputChannels, outputChannels };
136 if (transposeWeights)
137 {
138 std::swap(weightsShape[0], weightsShape[1]);
139 }
140
141 unsigned int biasShape[] = { outputChannels };
142
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000143 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
144 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
145 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
146 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000147
148 // Set quantization parameters if the requested type is a quantized type.
149 if(armnn::IsQuantizedType<T>())
150 {
151 inputTensorInfo.SetQuantizationScale(qScale);
152 inputTensorInfo.SetQuantizationOffset(qOffset);
153 outputTensorInfo.SetQuantizationScale(qScale);
154 outputTensorInfo.SetQuantizationOffset(qOffset);
155 }
156
157 LayerTestResult<T, 2> result(outputTensorInfo);
158
159 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
160 QuantizedVector<T>(qScale, qOffset, {
161 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
162 })
163 );
164
165 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
166 QuantizedVector<T>(qScale, qOffset, {
167 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
168 })
169 );
170
171 std::vector<T> biasValues({900000.f});
172 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
173
174 result = SimpleFullyConnectedTestImpl<T>(
175 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000176 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000177 inputTensorInfo, outputTensorInfo,
178 weightsDesc, biasesDesc,
179 weights, bias, input,
180 true, transposeWeights
181 );
182
183 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
184 QuantizedVector<T>(qScale, qOffset, {
185 965432.0f,
186 })
187 );
188
189 return result;
190}