blob: 7e9e9efac22bb7aff8fb6baadc9ec5de01b9f341 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010011#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010029 return ConvertConv2d(operation, model, data);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +010030 case V1_0::OperationType::DEPTH_TO_SPACE:
31 return ConvertDepthToSpace(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010032 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010033 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010034 case V1_0::OperationType::DEQUANTIZE:
35 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010036 case V1_0::OperationType::FLOOR:
37 return ConvertFloor(operation, model, data);
38 case V1_0::OperationType::FULLY_CONNECTED:
39 return ConvertFullyConnected(operation, model, data);
40 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41 return ConvertLocalResponseNormalization(operation, model, data);
42 case V1_0::OperationType::LOGISTIC:
43 return ConvertLogistic(operation, model, data);
44 case V1_0::OperationType::LSTM:
45 return ConvertLstm(operation, model, data);
46 case V1_0::OperationType::L2_NORMALIZATION:
47 return ConvertL2Normalization(operation, model, data);
48 case V1_0::OperationType::L2_POOL_2D:
49 return ConvertL2Pool2d(operation, model, data);
50 case V1_0::OperationType::MAX_POOL_2D:
51 return ConvertMaxPool2d(operation, model, data);
52 case V1_0::OperationType::MUL:
53 return ConvertMul(operation, model, data);
54 case V1_0::OperationType::RELU:
55 return ConvertReLu(operation, model, data);
56 case V1_0::OperationType::RELU1:
57 return ConvertReLu1(operation, model, data);
58 case V1_0::OperationType::RELU6:
59 return ConvertReLu6(operation, model, data);
60 case V1_0::OperationType::SOFTMAX:
61 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010062 case V1_0::OperationType::SPACE_TO_DEPTH:
63 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010064 case V1_0::OperationType::TANH:
65 return ConvertTanH(operation, model, data);
66 case V1_0::OperationType::RESHAPE:
67 return ConvertReshape(operation, model, data);
68 case V1_0::OperationType::RESIZE_BILINEAR:
69 return ConvertResizeBilinear(operation, model, data);
70 default:
71 return Fail("%s: Operation type %s not supported in ArmnnDriver",
72 __func__, toString(operation.type).c_str());
73 }
74}
75
76bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
77{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010078 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
Mike Kelly46272802019-08-14 17:00:48 +010079 return ::ConvertAdd<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010080}
81
82bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
83{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010084 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010085 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +010086}
87
88bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
89{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010090 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
Mike Kellyb8805202019-07-31 17:25:43 +010091 return ::ConvertConcatenation<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010092}
93
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010094bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
95{
96 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +010097 return ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010098}
99
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +0100100bool HalPolicy::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
101{
102 ALOGV("hal_1_0::HalPolicy::ConvertDepthToSpace()");
103 return ::ConvertDepthToSpace<hal_1_0::HalPolicy>(operation, model, data);
104}
105
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100106bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
107{
108 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +0100109 return ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100110}
111
David Monahanacf479a2019-05-29 14:27:04 +0100112bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
113{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100114 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
Mike Kelly46272802019-08-14 17:00:48 +0100115 return ::ConvertDequantize<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100116}
117
arovir01b0717b52018-09-05 17:03:25 +0100118bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
119{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100120 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
Mike Kelly46272802019-08-14 17:00:48 +0100121 return ::ConvertFloor<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100122}
123
124bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
125{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100126 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
Mike Kelly46272802019-08-14 17:00:48 +0100127 return ::ConvertFullyConnected<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100128}
129
130bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
131 const Model& model,
132 ConversionData& data)
133{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100134 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
Mike Kelly46272802019-08-14 17:00:48 +0100135 return ::ConvertLocalResponseNormalization<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100136}
137
138bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
139{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100140 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
Mike Kelly46272802019-08-14 17:00:48 +0100141 return ::ConvertLogistic<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100142}
143
144bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
145{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100146 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
147
arovir01b0717b52018-09-05 17:03:25 +0100148 // Inputs:
149 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
150 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100151 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100152 if (!input.IsValid())
153 {
154 return Fail("%s: Could not read input 0: input", __func__);
155 }
156 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100157 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100158 if (!outputStateIn.IsValid())
159 {
160 return Fail("%s: Could not read input 18: outputStateIn", __func__);
161 }
162 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100163 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100164 if (!cellStateIn.IsValid())
165 {
166 return Fail("%s: Could not read input 19: cellStateIn", __func__);
167 }
168
169 // Get the mandatory input tensors:
170 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
171 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100172 const ConstTensorPin inputToForgetWeightsPin =
173 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
174 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
175 // [num_units, input_size].
176 const ConstTensorPin inputToCellWeightsPin =
177 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100178 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
179 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100180 const ConstTensorPin inputToOutputWeightsPin =
181 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100182 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
183 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100184 const ConstTensorPin recurrentToForgetWeightsPin =
185 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100186 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
187 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100188 const ConstTensorPin recurrentToCellWeightsPin =
189 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100190 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
191 // [num_units, output_size].
192 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100193 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100194 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100195 const ConstTensorPin forgetGateBiasPin =
196 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100197 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100198 const ConstTensorPin cellBiasPin =
199 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100200 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100201 const ConstTensorPin outputGateBiasPin =
202 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100203
204 if (!inputToForgetWeightsPin.IsValid() ||
205 !inputToCellWeightsPin.IsValid() ||
206 !inputToOutputWeightsPin.IsValid() ||
207 !recurrentToForgetWeightsPin.IsValid() ||
208 !recurrentToCellWeightsPin.IsValid() ||
209 !recurrentToOutputWeightsPin.IsValid() ||
210 !forgetGateBiasPin.IsValid() ||
211 !cellBiasPin.IsValid() ||
212 !outputGateBiasPin.IsValid())
213 {
214 return Fail("%s: Operation has invalid tensor inputs", __func__);
215 }
216
217 // Get the optional input tensors:
218 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
219 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100220 const ConstTensorPin inputToInputWeightsPin =
221 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
222 1,
223 model,
224 data,
225 g_DontPermute,
226 nullptr,
227 true);
228
arovir01b0717b52018-09-05 17:03:25 +0100229 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
230 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
231 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100232 const ConstTensorPin recurrentToInputWeightsPin =
233 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
234 5,
235 model,
236 data,
237 g_DontPermute,
238 nullptr,
239 true);
240
arovir01b0717b52018-09-05 17:03:25 +0100241 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100242 const ConstTensorPin cellToInputWeightsPin =
243 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
244 9,
245 model,
246 data,
247 g_DontPermute,
248 nullptr,
249 true);
250
arovir01b0717b52018-09-05 17:03:25 +0100251 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100252 const ConstTensorPin cellToForgetWeightsPin =
253 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
254 10,
255 model,
256 data,
257 g_DontPermute,
258 nullptr,
259 true);
260
arovir01b0717b52018-09-05 17:03:25 +0100261 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100262 const ConstTensorPin cellToOutputWeightsPin =
263 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
264 11,
265 model,
266 data,
267 g_DontPermute,
268 nullptr,
269 true);
270
arovir01b0717b52018-09-05 17:03:25 +0100271 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100272 const ConstTensorPin inputGateBiasPin =
273 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
274 12,
275 model,
276 data,
277 g_DontPermute,
278 nullptr,
279 true);
280
arovir01b0717b52018-09-05 17:03:25 +0100281 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
282 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100283 const ConstTensorPin projectionWeightsPin =
284 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
285 16,
286 model,
287 data,
288 g_DontPermute,
289 nullptr,
290 true);
291
arovir01b0717b52018-09-05 17:03:25 +0100292 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100293 const ConstTensorPin projectionBiasPin =
294 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
295 17,
296 model,
297 data,
298 g_DontPermute,
299 nullptr,
300 true);
arovir01b0717b52018-09-05 17:03:25 +0100301
302 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
303 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
304 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
305 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
306 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
307 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
308 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
309 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
310 {
311 return Fail("%s: Operation has invalid tensor inputs", __func__);
312 }
313
314 // Get the mandatory input scalars (actually 1-D tensors of size 1):
315 // 20: The activation function: A value indicating the activation function:
316 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
317 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
318 // If set to 0.0 then clipping is disabled.
319 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
320 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
321 ActivationFn activation;
322 float cellClip;
323 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100324 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
325 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
326 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100327 {
328 return Fail("%s: Operation has invalid scalar inputs", __func__);
329 }
330
331 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100332 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
333 // with CIFG, or [batch_size, num_units * 3] without CIFG.
334 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100335 if (!scratchBuffer)
336 {
337 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
338 }
339 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100340 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100341 if (!outputStateOut)
342 {
343 return Fail("%s: Could not read output 1: outputStateOut", __func__);
344 }
345 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100346 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100347 if (!cellStateOut)
348 {
349 return Fail("%s: Could not read output 2: cellStateOut", __func__);
350 }
351 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
352 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100353 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100354 if (!output)
355 {
356 return Fail("%s: Could not read output 3: output", __func__);
357 }
358
359 // set the params structure for the AddLstmLayer call
360 armnn::LstmInputParams params;
361 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
362 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
363 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
364 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
365 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
366 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
367 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
368 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
369 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
370 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
371 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
372 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
373 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
374 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
375 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
376 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
377 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
378
379 // set the layer descriptor
380 armnn::LstmDescriptor desc;
381 desc.m_ActivationFunc = activation;
382 desc.m_ClippingThresCell = cellClip;
383 desc.m_ClippingThresProj = projClip;
384 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
385 params.m_RecurrentToInputWeights == nullptr ||
386 params.m_InputGateBias == nullptr);
387 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
388 params.m_CellToOutputWeights != nullptr);
389 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
390
391 // validate the optional input groups
392 if (desc.m_CifgEnabled &&
393 (params.m_InputToInputWeights != nullptr ||
394 params.m_RecurrentToInputWeights != nullptr ||
395 params.m_InputGateBias != nullptr))
396 {
397 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
398 " and input gate bias must be provided", __func__);
399 }
400
401 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
402 {
403 return Fail("%s: projection bias should not be provided without projection weights", __func__);
404 }
405
406 if (desc.m_PeepholeEnabled &&
407 (params.m_CellToForgetWeights == nullptr ||
408 params.m_CellToOutputWeights == nullptr ||
409 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
410 {
411 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
412 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
413 }
414
415 // Check if the layer is supported
416 // Inputs
417 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
418 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
419 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
420
421 // Outputs
422 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
423 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
424 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
425 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
426
427 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100428 armnn::LstmInputParamsInfo paramsInfo;
429 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
430 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
431 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
432 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
433 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
434 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
435 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
436 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
437 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100438
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100439 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100440 if(!desc.m_CifgEnabled)
441 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100442 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
443 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100444 if (params.m_CellToInputWeights != nullptr)
445 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100446 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100447 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100448 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100449 }
450
451 if(desc.m_ProjectionEnabled)
452 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100453 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100454 if (params.m_ProjectionBias != nullptr)
455 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100456 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100457 }
458 }
459
460 if(desc.m_PeepholeEnabled)
461 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100462 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
463 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100464 }
465
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100466 bool isSupported = false;
467 FORWARD_LAYER_SUPPORT_FUNC(__func__,
468 IsLstmSupported,
469 data.m_Backends,
470 isSupported,
471 inputInfo,
472 outputStateInInfo,
473 cellStateInInfo,
474 scratchBufferInfo,
475 outputStateOutInfo,
476 cellStateOutInfo,
477 outputInfo,
478 desc,
479 paramsInfo);
480 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100481 {
482 return false;
483 }
484
485 // Add the layer
486 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
487
488 input.Connect(layer->GetInputSlot(0));
489 outputStateIn.Connect(layer->GetInputSlot(1));
490 cellStateIn.Connect(layer->GetInputSlot(2));
491
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100492 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
493 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
494 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
495 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100496}
497
498bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
499{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100500 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
Mike Kelly46272802019-08-14 17:00:48 +0100501 return ::ConvertL2Normalization<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100502}
503
504bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
505{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100506 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100507 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100508}
509
510bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
511{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100512 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100513 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100514}
515
516bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
517{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100518 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
Mike Kelly46272802019-08-14 17:00:48 +0100519 return ::ConvertMul<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100520}
521
522bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
523{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100524 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +0100525 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100526}
527
528bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
529{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100530 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +0100531 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100532}
533
534bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
535{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100536 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +0100537 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100538}
539
540bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
541{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100542 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
543
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100544 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100545 if (!input.IsValid())
546 {
547 return Fail("%s: Operation has invalid inputs", __func__);
548 }
549
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100550 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100551 if (!outputOperand)
552 {
553 return Fail("%s: Operation has no outputs", __func__);
554 }
555
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100556 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100557 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100558 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100559 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100560 }
arovir01b0717b52018-09-05 17:03:25 +0100561
562 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100563 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100564 {
565 return Fail("%s: Operation has invalid inputs", __func__);
566 }
567
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100568 bool isSupported = false;
569 FORWARD_LAYER_SUPPORT_FUNC(__func__,
570 IsSoftmaxSupported,
571 data.m_Backends,
572 isSupported,
573 input.GetTensorInfo(),
574 outputInfo,
575 desc);
576 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100577 {
578 return false;
579 }
580
581 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
582 assert(layer != nullptr);
583 input.Connect(layer->GetInputSlot(0));
584
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100585 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100586}
587
Keith Davisa6bc52f2019-06-26 09:39:49 +0100588bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
589{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100590 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100591
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100592 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100593 if (!input.IsValid() )
594 {
595 return Fail("%s: Operation has invalid inputs", __func__);
596 }
597
598 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
599 unsigned int rank = inputInfo.GetNumDimensions();
600
601 if (rank != 4)
602 {
603 return Fail("%s: Only inputs with rank 4 are supported", __func__);
604 }
605
606 armnn::SpaceToDepthDescriptor desc;
607 bool dataLayoutCheck;
608
609 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
610
611 if (desc.m_BlockSize <= 1)
612 {
613 return Fail("%s: Block size must be at least 1 in all dimensions");
614 }
615
616 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
617 if (!output)
618 {
619 return Fail("%s: Could not read output 0", __func__);
620 }
621
622 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100623 if (IsDynamicTensor(outputInfo))
624 {
625 return Fail("%s: Dynamic output tensors are not supported", __func__);
626 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100627
628 bool isSupported = false;
629 FORWARD_LAYER_SUPPORT_FUNC(__func__,
630 IsSpaceToDepthSupported,
631 data.m_Backends,
632 isSupported,
633 inputInfo,
634 outputInfo,
635 desc);
636 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100637 {
638 return false;
639 }
640
641 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
642 assert(layer != nullptr);
643 input.Connect(layer->GetInputSlot(0));
644
645 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
646}
647
arovir01b0717b52018-09-05 17:03:25 +0100648bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
649{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100650 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +0100651 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652}
653
654bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
655{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100656 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
Mike Kelly46272802019-08-14 17:00:48 +0100657 return ::ConvertReshape<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100658}
659
660bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
661{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100662 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
663
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100664 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100665 if (!input.IsValid())
666 {
667 return Fail("%s: Could not read input 0", __func__);
668 }
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100671 if (!output)
672 {
673 return Fail("%s: Could not read output 0", __func__);
674 }
675
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100676 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100677 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
678
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100679 if (IsDynamicTensor(outputInfo))
680 {
681 return Fail("%s: Dynamic output tensors are not supported", __func__);
682 }
683
Aron Virginas-Tara5daf862019-07-01 19:07:20 +0100684 armnn::ResizeDescriptor desc;
685 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +0000686 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100687
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100688 bool isSupported = false;
689 FORWARD_LAYER_SUPPORT_FUNC(__func__,
690 IsResizeSupported,
691 data.m_Backends,
692 isSupported,
693 inputInfo,
694 outputInfo,
695 desc);
696 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100697 {
698 return false;
699 }
700
Aron Virginas-Tar535607d2019-07-03 15:46:15 +0100701 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
702 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100703 {
704 return Fail("%s: Operation has invalid inputs", __func__);
705 }
706
Aron Virginas-Tara5daf862019-07-01 19:07:20 +0100707 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +0000708
arovir01b0717b52018-09-05 17:03:25 +0100709 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100710
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +0000711 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
712 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100713
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100714 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100715
716}
717
718} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +0100719} // namespace armnn_driver