blob: 7552abca4e7ba8f6d485aa0515045068115f079a [file] [log] [blame]
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "QuantizedLstmEndToEndTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <CommonTestUtils.hpp>
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +01009#include "EndToEndTestImpl.hpp"
10
11#include <ResolveType.hpp>
12
13#include <armnn/INetwork.hpp>
Matthew Bentham246bd462020-01-20 16:16:06 +000014#include <armnn/QuantizedLstmParams.hpp>
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010015
Matthew Sloyan171214c2020-09-09 09:07:37 +010016#include <armnn/utility/NumericCast.hpp>
17
Colm Donelanc42a9872022-02-02 16:35:09 +000018#include <armnnTestUtils/TensorHelpers.hpp>
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010019
Sadik Armagan1625efc2021-06-10 18:24:34 +010020#include <doctest/doctest.h>
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010021
22#include <type_traits>
23
24namespace
25{
26
Sadik Armagan483c8112021-06-01 09:24:52 +010027armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
28 armnn::TensorShape& outputExpectedShape)
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010029{
Sadik Armagan483c8112021-06-01 09:24:52 +010030 auto batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
31 auto inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
32 auto outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010033
34 float inputOutputScale = 0.0078125f;
35 int32_t inputOutputOffset = 128;
36
37 float weightsScale = 0.00408021f;
38 int32_t weightsOffset = 100;
39
40 float biasScale = 3.1876640625e-05f;
41 int32_t biasOffset = 0;
42
43 float cellStateScale = 0.00048828125f;
44 int32_t cellStateOffset = 0;
45
46 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +000047 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010048 weightsScale,
Cathal Corbett019da942021-11-10 12:50:57 +000049 weightsOffset,
50 true);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010051
52 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +000053 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010054 weightsScale,
Cathal Corbett019da942021-11-10 12:50:57 +000055 weightsOffset,
56 true);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010057
Cathal Corbett5b8093c2021-10-22 11:12:07 +010058 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +010059
60 armnn::QuantizedLstmInputParams data;
61
62 const std::vector<uint8_t> inputToInputWeightsVector = {146, 250, 235, 171, 10, 218, 171, 108};
63 armnn::ConstTensor inputToInputWeightsTensor(inputWeightsInfo, inputToInputWeightsVector.data());
64
65 const std::vector<uint8_t> inputToForgetWeightsVector = {24, 50, 132, 179, 158, 110, 3, 169};
66 armnn::ConstTensor inputToForgetWeightsTensor(inputWeightsInfo, inputToForgetWeightsVector.data());
67
68 const std::vector<uint8_t> inputToCellWeightsTensorVector = {133, 34, 29, 49, 206, 109, 54, 183};
69 armnn::ConstTensor inputToCellWeightsTensor(inputWeightsInfo, inputToCellWeightsTensorVector.data());
70
71 const std::vector<uint8_t> inputToOutputWeightsTensorVector = {195, 187, 11, 99, 109, 10, 218, 48};
72 armnn::ConstTensor inputToOutputWeightsTensor(inputWeightsInfo, inputToOutputWeightsTensorVector.data());
73
74 const std::vector<uint8_t> recurrentToInputWeightsTensorVector =
75 {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
76 armnn::ConstTensor recurrentToInputWeightsTensor(recurrentWeightsInfo, recurrentToInputWeightsTensorVector.data());
77
78 const std::vector<uint8_t> recurrentToForgetWeightsTensorVector =
79 {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
80 armnn::ConstTensor recurrentToForgetWeightsTensor(recurrentWeightsInfo,
81 recurrentToForgetWeightsTensorVector.data());
82
83 const std::vector<uint8_t> recurrentToCellWeightsTensorVector =
84 {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
85 armnn::ConstTensor recurrentToCellWeightsTensor(recurrentWeightsInfo, recurrentToCellWeightsTensorVector.data());
86
87 const std::vector<uint8_t> recurrentToOutputWeightsTensorVector =
88 {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
89 armnn::ConstTensor recurrentToOutputWeightsTensor(recurrentWeightsInfo,
90 recurrentToOutputWeightsTensorVector.data());
91
92 const std::vector<int32_t> inputGateBiasTensorVector = {-7876, 13488, -726, 32839};
93 armnn::ConstTensor inputGateBiasTensor(biasInfo, inputGateBiasTensorVector.data());
94
95 const std::vector<int32_t> forgetGateBiasTensorVector = {9206, -46884, -11693, -38724};
96 armnn::ConstTensor forgetGateBiasTensor(biasInfo, forgetGateBiasTensorVector.data());
97
98 const std::vector<int32_t> cellBiasTensorVector = {39481, 48624, 48976, -21419};
99 armnn::ConstTensor cellBiasTensor(biasInfo, cellBiasTensorVector.data());
100
101 const std::vector<int32_t> outputGateBiasTensorVector = {-58999, -17050, -41852, -40538};
102 armnn::ConstTensor outputGateBiasTensor(biasInfo, outputGateBiasTensorVector.data());
103
104 data.m_InputToInputWeights = &inputToInputWeightsTensor;
105 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
106 data.m_InputToCellWeights = &inputToCellWeightsTensor;
107 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
108 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
109 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
110 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
111 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
112 data.m_InputGateBias = &inputGateBiasTensor;
113 data.m_ForgetGateBias = &forgetGateBiasTensor;
114 data.m_CellBias = &cellBiasTensor;
115 data.m_OutputGateBias = &outputGateBiasTensor;
116
117 armnn::INetworkPtr net(armnn::INetwork::Create());
118
119 armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0);
120 armnn::IConnectableLayer* const cellStateIn = net->AddInputLayer(1);
121 armnn::IConnectableLayer* const outputStateIn = net->AddInputLayer(2);
122 armnn::IConnectableLayer* const quantizedLstmLayer = net->AddQuantizedLstmLayer(data, "quantizedLstm");
123 armnn::IConnectableLayer* const cellStateOut = net->AddOutputLayer(0);
124 armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(1);
125
126 armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000127 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100128 inputOutputScale,
129 inputOutputOffset);
130
131 armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000132 armnn::DataType::QSymmS16,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100133 cellStateScale,
134 cellStateOffset);
135
136 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000137 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100138 inputOutputScale,
139 inputOutputOffset);
140
141 armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000142 armnn::DataType::QSymmS16,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100143 cellStateScale,
144 cellStateOffset);
145
146 armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000147 armnn::DataType::QAsymmU8,
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100148 inputOutputScale,
149 inputOutputOffset);
150
151 // connect up
152 // inputs
153 Connect(inputLayer, quantizedLstmLayer, inputTensorInfo, 0, 0);
154 Connect(cellStateIn, quantizedLstmLayer, cellStateInTensorInfo, 0, 1);
155 Connect(outputStateIn, quantizedLstmLayer, outputStateInTensorInfo, 0, 2);
156
157 // outputs
158 Connect(quantizedLstmLayer, cellStateOut, cellStateOutTensorInfo, 0, 0);
159 Connect(quantizedLstmLayer, outputStateOut, outputTensorInfo, 1, 0);
160
161 return net;
162}
163
164// Checks if two values of an arithmetic type are close enough to each other
165// with regard to a given tolerance value.
166template<typename T>
167typename std::enable_if<std::is_arithmetic<T>::value, bool>::type
168IsCloseEnough(T value1, T value2, T tolerance)
169{
170 if (tolerance < 0)
171 {
172 throw armnn::InvalidArgumentException("Tolerance cannot be < 0");
173 }
174
175 T diff = value1 >= value2 ? static_cast<T>(value1 - value2) : static_cast<T>(value2 - value1);
176 return diff <= tolerance;
177}
178
179} // anonymous namespace
180
181void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
182{
183 std::vector<uint8_t> inputVector = {166, 179, 50, 150};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000184 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100185
186 std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000187 armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100188
189 std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000190 armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100191
192 std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000193 armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100194
195 std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
Derek Lambertif90c56d2020-01-10 17:14:08 +0000196 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100197
198 // Builds up the structure of the network
Sadik Armagan483c8112021-06-01 09:24:52 +0100199 armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape());
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100200
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100201 IRuntime::CreationOptions options;
202 IRuntimePtr runtime(IRuntime::Create(options));
203
204 // optimize the network
205 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
206
207 // Loads it into the runtime.
208 NetworkId netId;
209 runtime->LoadNetwork(netId, std::move(optNet));
210
211 InputTensors inputTensors;
212 inputTensors.reserve(3);
213
214 // input
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100215 TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
216 TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
217 TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
218 inputTensorInfo0.SetConstant(true);
219 inputTensorInfo1.SetConstant(true);
220 inputTensorInfo2.SetConstant(true);
221
222 inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())});
223 inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())});
224 inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())});
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100225
226 OutputTensors outputTensors;
227 outputTensors.reserve(2);
228
229 //output
Sadik Armagan483c8112021-06-01 09:24:52 +0100230 std::vector<int16_t> cellStateOutResult(cellStateOutVector.size());
231 std::vector<uint8_t> outputStateOutResult(outputStateOutVector.size());
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100232 outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())});
233 outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())});
234
235 // Does the inference.
236 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
237
238 // Checks the results
239 constexpr int16_t toleranceInt16 = 2;
240 for (unsigned int i = 0u; i < cellStateOutResult.size(); ++i)
241 {
Sadik Armagan1625efc2021-06-10 18:24:34 +0100242 CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100243 }
244
245 constexpr uint8_t toleranceUint8 = 1;
246 for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
247 {
Sadik Armagan1625efc2021-06-10 18:24:34 +0100248 CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
Aron Virginas-Tar46ff1ca2019-09-12 11:03:09 +0100249 }
250}