blob: 404a412ca096761bc0abb988595b7e781fe70cc8 [file] [log] [blame]
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "QuantizedLstmEndToEndTestImpl.hpp"
7
8#include "CommonTestUtils.hpp"
9#include "EndToEndTestImpl.hpp"
10
11#include <ResolveType.hpp>
12
13#include <armnn/INetwork.hpp>
Matthew Bentham246bd462020-01-20 16:16:06 +000014#include <armnn/QuantizedLstmParams.hpp>
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010015
Matthew Sloyan171214c2020-09-09 09:07:37 +010016#include <armnn/utility/NumericCast.hpp>
17
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010018#include <test/TensorHelpers.hpp>
19
20#include <boost/test/unit_test.hpp>
21
22#include <type_traits>
23
24namespace
25{
26
27using MultiArray = const boost::multi_array<uint8_t, 2>&;
28
29armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
30 MultiArray expectedOutput)
31{
Matthew Sloyan171214c2020-09-09 09:07:37 +010032 auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
33 auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
34 auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010035
36 float inputOutputScale = 0.0078125f;
37 int32_t inputOutputOffset = 128;
38
39 float weightsScale = 0.00408021f;
40 int32_t weightsOffset = 100;
41
42 float biasScale = 3.1876640625e-05f;
43 int32_t biasOffset = 0;
44
45 float cellStateScale = 0.00048828125f;
46 int32_t cellStateOffset = 0;
47
48 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +000049 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010050 weightsScale,
51 weightsOffset);
52
53 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +000054 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010055 weightsScale,
56 weightsOffset);
57
58 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
59
60 armnn::QuantizedLstmInputParams data;
61
62 const std::vector<uint8_t> inputToInputWeightsVector = {146, 250, 235, 171, 10, 218, 171, 108};
63 armnn::ConstTensor inputToInputWeightsTensor(inputWeightsInfo, inputToInputWeightsVector.data());
64
65 const std::vector<uint8_t> inputToForgetWeightsVector = {24, 50, 132, 179, 158, 110, 3, 169};
66 armnn::ConstTensor inputToForgetWeightsTensor(inputWeightsInfo, inputToForgetWeightsVector.data());
67
68 const std::vector<uint8_t> inputToCellWeightsTensorVector = {133, 34, 29, 49, 206, 109, 54, 183};
69 armnn::ConstTensor inputToCellWeightsTensor(inputWeightsInfo, inputToCellWeightsTensorVector.data());
70
71 const std::vector<uint8_t> inputToOutputWeightsTensorVector = {195, 187, 11, 99, 109, 10, 218, 48};
72 armnn::ConstTensor inputToOutputWeightsTensor(inputWeightsInfo, inputToOutputWeightsTensorVector.data());
73
74 const std::vector<uint8_t> recurrentToInputWeightsTensorVector =
75 {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
76 armnn::ConstTensor recurrentToInputWeightsTensor(recurrentWeightsInfo, recurrentToInputWeightsTensorVector.data());
77
78 const std::vector<uint8_t> recurrentToForgetWeightsTensorVector =
79 {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
80 armnn::ConstTensor recurrentToForgetWeightsTensor(recurrentWeightsInfo,
81 recurrentToForgetWeightsTensorVector.data());
82
83 const std::vector<uint8_t> recurrentToCellWeightsTensorVector =
84 {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
85 armnn::ConstTensor recurrentToCellWeightsTensor(recurrentWeightsInfo, recurrentToCellWeightsTensorVector.data());
86
87 const std::vector<uint8_t> recurrentToOutputWeightsTensorVector =
88 {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
89 armnn::ConstTensor recurrentToOutputWeightsTensor(recurrentWeightsInfo,
90 recurrentToOutputWeightsTensorVector.data());
91
92 const std::vector<int32_t> inputGateBiasTensorVector = {-7876, 13488, -726, 32839};
93 armnn::ConstTensor inputGateBiasTensor(biasInfo, inputGateBiasTensorVector.data());
94
95 const std::vector<int32_t> forgetGateBiasTensorVector = {9206, -46884, -11693, -38724};
96 armnn::ConstTensor forgetGateBiasTensor(biasInfo, forgetGateBiasTensorVector.data());
97
98 const std::vector<int32_t> cellBiasTensorVector = {39481, 48624, 48976, -21419};
99 armnn::ConstTensor cellBiasTensor(biasInfo, cellBiasTensorVector.data());
100
101 const std::vector<int32_t> outputGateBiasTensorVector = {-58999, -17050, -41852, -40538};
102 armnn::ConstTensor outputGateBiasTensor(biasInfo, outputGateBiasTensorVector.data());
103
104 data.m_InputToInputWeights = &inputToInputWeightsTensor;
105 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
106 data.m_InputToCellWeights = &inputToCellWeightsTensor;
107 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
108 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
109 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
110 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
111 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
112 data.m_InputGateBias = &inputGateBiasTensor;
113 data.m_ForgetGateBias = &forgetGateBiasTensor;
114 data.m_CellBias = &cellBiasTensor;
115 data.m_OutputGateBias = &outputGateBiasTensor;
116
117 armnn::INetworkPtr net(armnn::INetwork::Create());
118
119 armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0);
120 armnn::IConnectableLayer* const cellStateIn = net->AddInputLayer(1);
121 armnn::IConnectableLayer* const outputStateIn = net->AddInputLayer(2);
122 armnn::IConnectableLayer* const quantizedLstmLayer = net->AddQuantizedLstmLayer(data, "quantizedLstm");
123 armnn::IConnectableLayer* const cellStateOut = net->AddOutputLayer(0);
124 armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(1);
125
126 armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000127 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100128 inputOutputScale,
129 inputOutputOffset);
130
131 armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000132 armnn::DataType::QSymmS16,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100133 cellStateScale,
134 cellStateOffset);
135
136 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000137 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100138 inputOutputScale,
139 inputOutputOffset);
140
141 armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000142 armnn::DataType::QSymmS16,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100143 cellStateScale,
144 cellStateOffset);
145
146 armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000147 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100148 inputOutputScale,
149 inputOutputOffset);
150
151 // connect up
152 // inputs
153 Connect(inputLayer, quantizedLstmLayer, inputTensorInfo, 0, 0);
154 Connect(cellStateIn, quantizedLstmLayer, cellStateInTensorInfo, 0, 1);
155 Connect(outputStateIn, quantizedLstmLayer, outputStateInTensorInfo, 0, 2);
156
157 // outputs
158 Connect(quantizedLstmLayer, cellStateOut, cellStateOutTensorInfo, 0, 0);
159 Connect(quantizedLstmLayer, outputStateOut, outputTensorInfo, 1, 0);
160
161 return net;
162}
163
164// Checks if two values of an arithmetic type are close enough to each other
165// with regard to a given tolerance value.
166template<typename T>
167typename std::enable_if<std::is_arithmetic<T>::value, bool>::type
168IsCloseEnough(T value1, T value2, T tolerance)
169{
170 if (tolerance < 0)
171 {
172 throw armnn::InvalidArgumentException("Tolerance cannot be < 0");
173 }
174
175 T diff = value1 >= value2 ? static_cast<T>(value1 - value2) : static_cast<T>(value2 - value1);
176 return diff <= tolerance;
177}
178
179} // anonymous namespace
180
181void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
182{
183 std::vector<uint8_t> inputVector = {166, 179, 50, 150};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000184 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100185 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
186
187 std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000188 armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100189 boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
190
191 std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000192 armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100193 boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
194
195 std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000196 armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100197 boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
198
199 std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000200 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100201 boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
202
203 // Builds up the structure of the network
204 armnn::INetworkPtr net = CreateQuantizedLstmNetwork(input, outputStateOut);
205
206 BOOST_TEST_CHECKPOINT("create a network");
207
208 IRuntime::CreationOptions options;
209 IRuntimePtr runtime(IRuntime::Create(options));
210
211 // optimize the network
212 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
213
214 // Loads it into the runtime.
215 NetworkId netId;
216 runtime->LoadNetwork(netId, std::move(optNet));
217
218 InputTensors inputTensors;
219 inputTensors.reserve(3);
220
221 // input
222 inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
223 inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())});
224 inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())});
225
226 OutputTensors outputTensors;
227 outputTensors.reserve(2);
228
229 //output
230 std::vector<int16_t > cellStateOutResult(cellStateOutVector.size());
231 std::vector<uint8_t > outputStateOutResult(outputStateOutVector.size());
232 outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())});
233 outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())});
234
235 // Does the inference.
236 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
237
238 // Checks the results
239 constexpr int16_t toleranceInt16 = 2;
240 for (unsigned int i = 0u; i < cellStateOutResult.size(); ++i)
241 {
242 BOOST_CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
243 }
244
245 constexpr uint8_t toleranceUint8 = 1;
246 for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
247 {
248 BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
249 }
250}