blob: 28d01ec127cabd483130f50c148f59979b65e32e [file] [log] [blame]
Matteo Martincigh747ef822018-12-18 09:26:39 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Matteo Martincighe5b8eb92019-11-28 15:45:42 +00006#include <backendsCommon/WorkloadUtils.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +00007
Derek Lambertid466a542020-01-22 15:37:29 +00008#include <armnn/Utils.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +01009#include <armnn/utility/NumericCast.hpp>
Jan Eilers53ef7952021-06-02 12:01:25 +010010#include <armnnUtils/DataLayoutIndexed.hpp>
11
12#include <fmt/format.h>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +010013#include <numeric>
Jan Eilersbb446e52020-04-02 13:56:54 +010014
Matteo Martincigh747ef822018-12-18 09:26:39 +000015namespace armnn
16{
17
James Conroy1f58f032021-04-27 17:13:27 +010018armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
Kevin May665a964a2019-08-21 16:53:50 +010019 const PermutationVector& permutationVector, void* permuteBuffer)
Matteo Martincigh747ef822018-12-18 09:26:39 +000020{
David Monahan6a1d5062023-08-29 09:10:50 +010021 if (tensor == nullptr)
22 {
23 throw armnn::InvalidArgumentException("WorkloadUtils: PermuteTensor: Null input tensor pointer");
24 }
25 if (permuteBuffer == nullptr)
26 {
27 throw armnn::InvalidArgumentException("WorkloadUtils: PermuteTensor: Null permute buffer pointer");
28 }
Matteo Martincigh747ef822018-12-18 09:26:39 +000029
30 TensorInfo tensorInfo = tensor->GetTensorInfo();
31
32 if (permutationVector.GetSize() > 0)
33 {
34 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector);
35 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector,
36 tensor->GetConstTensor<void>(), permuteBuffer,
37 GetDataTypeSize(tensorInfo.GetDataType()));
38 }
39 else
40 {
41 ::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
42 }
Cathal Corbett5b8093c2021-10-22 11:12:07 +010043 tensorInfo.SetConstant(true);
Matteo Martincigh747ef822018-12-18 09:26:39 +000044 return ConstTensor(tensorInfo, permuteBuffer);
45}
46
47void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout)
48{
49 // Reshape the weights in-place
50 const TensorShape& weightShape = weightInfo.GetShape();
51 switch (dataLayout)
52 {
53 case DataLayout::NHWC:
54 // The data layout is NHWC, reshape from [ H, W, I, M ] to [ 1, H, W, I * M ]
55 weightInfo.SetShape({ 1,
56 weightShape[0],
57 weightShape[1],
58 weightShape[2] * weightShape[3] });
Matteo Martincigh747ef822018-12-18 09:26:39 +000059 weightInfo.SetShape({ 1,
60 weightShape[0] * weightShape[1],
61 weightShape[2],
62 weightShape[3] });
63 break;
Kevin May665a964a2019-08-21 16:53:50 +010064 case DataLayout::NCHW:
65 default:
66 // The data layout is NCHW, reshape from [ M, I, H, W ] to [ 1, I * M, H, W, ]
67 weightInfo.SetShape({ 1, weightShape[0] * weightShape[1], weightShape[2], weightShape[3] });
68 break;
Matteo Martincigh747ef822018-12-18 09:26:39 +000069 }
70}
71
Kevin May665a964a2019-08-21 16:53:50 +010072template <typename DataType>
73ConstTensor ReorderWeightChannelsForAcl(const ConstTensor& weightHandle, DataLayout dataLayout, void* permuteBuffer)
74{
75 DataType* weight = static_cast<DataType*>(permuteBuffer);
76 const TensorShape& weightShape = weightHandle.GetShape();
77 unsigned int multiplier;
78 unsigned int height;
79 unsigned int width;
80 unsigned int inputChannels;
81 switch (dataLayout)
82 {
83 case DataLayout::NHWC: //It actually is [ H, W, I, M ]
84 height = weightShape[0];
85 width = weightShape[1];
86 inputChannels = weightShape[2];
87 multiplier = weightShape[3];
88 break;
89 case DataLayout::NCHW: //It actually is [ M, I, H, W ]
90 default:
91 height = weightShape[2];
92 width = weightShape[3];
93 inputChannels = weightShape[1];
94 multiplier = weightShape[0];
95 break;
96 }
97
Rob Hughes93667b12019-09-23 16:24:05 +010098 std::vector<DataType> weightAclOrder(height*width*inputChannels*multiplier);
Kevin May665a964a2019-08-21 16:53:50 +010099 unsigned int destinationWeightsChannel;
100 unsigned int totalChannels = inputChannels * multiplier;
101 unsigned int channelSize = height * width;
Teresa Charlin93cbbcc2019-12-18 22:10:47 +0000102 unsigned int inputChannel = 0;
Kevin May665a964a2019-08-21 16:53:50 +0100103
104 for (unsigned int originWeightsChannel = 0; originWeightsChannel < totalChannels; originWeightsChannel++)
105 {
Teresa Charlin93cbbcc2019-12-18 22:10:47 +0000106 inputChannel = originWeightsChannel % inputChannels;
107 destinationWeightsChannel = (originWeightsChannel - inputChannel) / inputChannels + multiplier * inputChannel;
Kevin May665a964a2019-08-21 16:53:50 +0100108
109 for (unsigned int i = 0; i < channelSize; i++)
110 {
111 weightAclOrder[i + destinationWeightsChannel * channelSize] =
112 weight[i + originWeightsChannel * channelSize];
113 }
114 }
115
Rob Hughes93667b12019-09-23 16:24:05 +0100116 ::memcpy(permuteBuffer, weightAclOrder.data(), weightHandle.GetInfo().GetNumBytes());
Kevin May665a964a2019-08-21 16:53:50 +0100117 return ConstTensor(weightHandle.GetInfo(), permuteBuffer);
118}
119
Jan Eilers53ef7952021-06-02 12:01:25 +0100120
Matteo Martincigh747ef822018-12-18 09:26:39 +0000121TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout)
122{
123 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
124 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
125
126 // 1. Permute the weights if necessary
127 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
128 // starting from the current shape of [ M, I, H, W ]
129 TensorInfo weightPermutedInfo(weightInfo);
130 if (dataLayout == DataLayout::NHWC)
131 {
132 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
133 PermutationVector permutationVector{ 3, 2, 0, 1 };
134 weightPermutedInfo = armnnUtils::Permuted(weightInfo, permutationVector);
135 }
136
137 // 2. Reshape the weights
138 ReshapeWeightsForAcl(weightPermutedInfo, dataLayout);
139
140 // 3. Return the permuted weight info
141 return weightPermutedInfo;
142}
143
Jan Eilers53ef7952021-06-02 12:01:25 +0100144
145std::tuple<ConstTensor, unsigned int> Convert1HWOTensorToAcl(const ConstTensorHandle* weightTensor,
146 const TensorInfo& inputInfo,
147 const DataLayout dataLayout,
148 void* permuteBuffer)
149{
150 TensorInfo weightsInfo = weightTensor->GetTensorInfo();
151 unsigned int depthMultiplier = 1;
152 PermutationVector permutationVector{};
153 if (dataLayout == armnn::DataLayout::NHWC)
154 {
155 // No permutation required. Data layouts are the same.
156
157 depthMultiplier = weightsInfo.GetShape()[3] / inputInfo.GetShape()[3];
158 }
159 else if (dataLayout == armnn::DataLayout::NCHW)
160 {
161 // [ 1, H, W, I*M] --> [ 1, I * M, H, W ]
162 depthMultiplier = weightsInfo.GetShape()[3] / inputInfo.GetShape()[1];
163 permutationVector = { 0, 2, 3, 1 };
164 }
165 else
166 {
167 throw InvalidArgumentException(fmt::format("Unknown data layout for tensor conversion: {}",
168 GetDataLayoutName(dataLayout)));
169 }
170
171 ConstTensor weightsPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer);
172
173 return std::make_tuple(weightsPermuted, depthMultiplier);
174}
175
176std::tuple<TensorInfo, unsigned int> Convert1HWOTensorInfoToAcl(const TensorInfo& weightInfo,
177 const TensorInfo& inputInfo,
178 const DataLayout dataLayout)
179{
180 unsigned int aclDepthMultiplier = 1;
181 TensorInfo weightsPermuted;
182 if (dataLayout == armnn::DataLayout::NHWC)
183 {
Cathal Corbett4b19d222022-05-11 20:12:17 +0100184 // No permutation required. Input and weights data layouts are the same.
Jan Eilers53ef7952021-06-02 12:01:25 +0100185 aclDepthMultiplier = weightInfo.GetShape()[3] / inputInfo.GetShape()[3];
186 weightsPermuted = weightInfo;
187 }
Cathal Corbett4b19d222022-05-11 20:12:17 +0100188
Jan Eilers53ef7952021-06-02 12:01:25 +0100189 else if (dataLayout == armnn::DataLayout::NCHW)
190 {
Cathal Corbett4b19d222022-05-11 20:12:17 +0100191 // Weights permutation required. Weights [N,H,W,C] and input [N,C,H,W] data layouts are different.
Jan Eilers53ef7952021-06-02 12:01:25 +0100192 // [ 1, H, W, I*M] --> [ 1, I * M, H, W ]
193 aclDepthMultiplier = weightInfo.GetShape()[3] / inputInfo.GetShape()[1];
194 PermutationVector permutationVector{ 0, 2, 3, 1 };
195 weightsPermuted = armnnUtils::Permuted(weightInfo, permutationVector);
196 }
197 else
198 {
199 throw InvalidArgumentException(fmt::format("Unknown data layout for tensor info conversion: {}",
200 GetDataLayoutName(dataLayout)));
201 }
202
203 return std::make_tuple(weightsPermuted, aclDepthMultiplier);
204}
205
206
207std::tuple<ConstTensor, unsigned int> Convert1HWOtoMIHW(const ConstTensorHandle* weightTensor,
208 const TensorInfo& inputInfo,
209 const DataLayout& dataLayout,
210 void* permuteBuffer)
211{
212 TensorInfo weightsInfo = weightTensor->GetTensorInfo();
213
214 if (weightsInfo.HasPerAxisQuantization())
215 {
216 throw InvalidArgumentException("Can't convert tensor from [1,H,W,Cout] to [M,Cin,H,W] when per channel "
217 "quantization is applied.");
218 }
219
220 // Reshape weights [ 1, H, W, I*M ] --> [ H, W, I, M ]
221 auto weightsShape = weightsInfo.GetShape();
222 auto channelIndex = armnnUtils::DataLayoutIndexed(dataLayout).GetChannelsIndex();
223 unsigned int depthMultiplier = weightsShape[3] / inputInfo.GetShape()[channelIndex];
224 weightsInfo.SetShape({ weightsShape[1],
225 weightsShape[2],
226 inputInfo.GetShape()[channelIndex],
227 depthMultiplier});
228
229 // Permute [ H, W, I, M ] --> [ M, I, H, W ]
230 PermutationVector permutationVector = { 2, 3, 1, 0 };
231 ConstTensor weightsPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer);
232
233 return std::make_tuple(weightsPermuted, depthMultiplier);
234}
235
James Conroy1f58f032021-04-27 17:13:27 +0100236armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* weightTensor,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000237 DataLayout dataLayout,
238 void* permuteBuffer)
239{
David Monahan6a1d5062023-08-29 09:10:50 +0100240 if (weightTensor == nullptr)
241 {
242 throw armnn::InvalidArgumentException("WorkloadUtils: PermuteTensor: Null input tensor pointer");
243 }
244 if (permuteBuffer == nullptr)
245 {
246 throw armnn::InvalidArgumentException("WorkloadUtils: PermuteTensor: Null permute buffer pointer");
247 }
Matteo Martincigh747ef822018-12-18 09:26:39 +0000248
Kevin May665a964a2019-08-21 16:53:50 +0100249 auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
250 auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
251
Matteo Martincigh747ef822018-12-18 09:26:39 +0000252 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
253 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
254
255 // 1. Permute the weights if necessary
256 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
257 // starting from the current shape of [ M, I, H, W ]
258 // If no permutation is necessary, leave the permutation vector empty
259 PermutationVector permutationVector{};
260 if (dataLayout == DataLayout::NHWC)
261 {
262 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
263 permutationVector = { 3, 2, 0, 1 };
264 }
265 ConstTensor weightPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer);
266
Kevin May665a964a2019-08-21 16:53:50 +0100267 // Shuffle the weights data to obtain the channel order needed used by Acl
Rob Hughes93667b12019-09-23 16:24:05 +0100268 if (multiplier > 1 && inputChannels > 1 && dataLayout == DataLayout::NCHW)
Kevin May665a964a2019-08-21 16:53:50 +0100269 {
270 switch (weightPermuted.GetDataType())
271 {
272 case DataType::Float32:
273 weightPermuted = ReorderWeightChannelsForAcl<float>(weightPermuted, dataLayout, permuteBuffer);
274 break;
275 case DataType::Float16:
276 weightPermuted =
277 ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
278 break;
Keith Davisa8565012020-02-14 12:22:40 +0000279 case DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000280 case DataType::QAsymmU8:
Kevin May665a964a2019-08-21 16:53:50 +0100281 weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
282 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000283 case DataType::QSymmS8:
Teresa Charlina68d8532019-11-29 13:59:18 +0000284 weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer);
285 break;
Kevin May665a964a2019-08-21 16:53:50 +0100286 default:
287 break;
288 }
289 }
290
Matteo Martincigh747ef822018-12-18 09:26:39 +0000291 // 2. Reshape the weights
292 ReshapeWeightsForAcl(weightPermuted.GetInfo(), dataLayout);
293
294 // 3. Return both the tensor and the allocated storage to ensure that the data stays alive
295 return weightPermuted;
296}
297
Francis Murtaghec33a912019-11-05 14:26:23 +0000298int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
299{
300 int32_t reversedMask = 0;
Matthew Sloyan171214c2020-09-09 09:07:37 +0100301 for (unsigned int i = 0; i < armnn::numeric_cast<unsigned int>(numDim); ++i)
Francis Murtaghec33a912019-11-05 14:26:23 +0000302 {
303 // Check if bit set in mask for each dimension
304 int32_t bit = (mask & 1 << i) != 0;
305 // Increment the new mask with the bits reversed
Matthew Sloyan171214c2020-09-09 09:07:37 +0100306 reversedMask += (bit << std::max(numDim-(armnn::numeric_cast<int>(i)+1), 0));
Francis Murtaghec33a912019-11-05 14:26:23 +0000307 }
308
309 return reversedMask;
310}
311
Teresa Charlinb2d3ec52022-04-12 22:07:09 +0100312std::map<std::string, unsigned int> CalculateGatherNdKeyIndices(TensorInfo inputInfo0, TensorInfo inputInfo1)
313{
314 std::vector<unsigned int> paramsShape;
315 for (unsigned int i = 0; i < inputInfo0.GetNumDimensions(); ++i)
316 {
317 paramsShape.push_back(inputInfo0.GetShape()[i]);
318 }
319
320 std::vector<unsigned int> indicesShape;
321 for (unsigned int i = 0; i < inputInfo1.GetNumDimensions(); ++i)
322 {
323 indicesShape.push_back(inputInfo1.GetShape()[i]);
324 }
325
326 std::map<std::string, unsigned int> keyIndices;
327
328 // N: number of batches
329 keyIndices["N"] = 1;
330
331 // ND: number of dimensions that are sliced from params
332 keyIndices["ND"] = indicesShape.back();
333
334 // W: number of indices in each batch (all but the last dimension)
335 keyIndices["W"] =
336 static_cast<unsigned int>(std::accumulate(std::begin(indicesShape),
337 std::end(indicesShape) - 1,
338 1,
339 std::multiplies<>() ));
340 // K: range of each index
341 keyIndices["K"] =
342 static_cast<unsigned int>(std::accumulate(std::begin(paramsShape),
343 std::begin(paramsShape) + static_cast<int>(keyIndices["ND"]),
344 1,
345 std::multiplies<>() ));
346 // C: number of channels for each index
347 keyIndices["C"] =
348 static_cast<unsigned int>(std::accumulate(std::begin(paramsShape) + static_cast<int>(keyIndices["ND"]),
349 std::end(paramsShape),
350 1,
351 std::multiplies<>() ));
352
353 return keyIndices;
354}
355
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100356armnn::PermutationVector GeneratePermutationVectorOnLastTwoDimensions(unsigned int rank)
357{
358 armnn::PermutationVector permutationVector{};
359 switch (rank)
360 {
361 case 2:
362 permutationVector = {1U, 0U};
363 break;
364 case 3:
365 permutationVector = {0U, 2U, 1U};
366 break;
367 case 4:
368 permutationVector = {0U, 1U, 3U, 2U};
369 break;
370 default:
371 throw Exception("Invalid number of dimensions.");
372 }
373 return permutationVector;
374}
375
Matteo Martincigh747ef822018-12-18 09:26:39 +0000376} // namespace armnn