blob: 5886630cd99666bb0e37d1f41d62a062685b4ecb [file] [log] [blame]
Matteo Martincigh747ef822018-12-18 09:26:39 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Matteo Martincighe5b8eb92019-11-28 15:45:42 +00006#include <backendsCommon/WorkloadUtils.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +00007
Derek Lambertid466a542020-01-22 15:37:29 +00008#include <armnn/Utils.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +01009#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010010
Matteo Martincigh747ef822018-12-18 09:26:39 +000011namespace armnn
12{
13
14armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
Kevin May665a964a2019-08-21 16:53:50 +010015 const PermutationVector& permutationVector, void* permuteBuffer)
Matteo Martincigh747ef822018-12-18 09:26:39 +000016{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017 ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
18 ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
Matteo Martincigh747ef822018-12-18 09:26:39 +000019
20 TensorInfo tensorInfo = tensor->GetTensorInfo();
21
22 if (permutationVector.GetSize() > 0)
23 {
24 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector);
25 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector,
26 tensor->GetConstTensor<void>(), permuteBuffer,
27 GetDataTypeSize(tensorInfo.GetDataType()));
28 }
29 else
30 {
31 ::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
32 }
33
34 return ConstTensor(tensorInfo, permuteBuffer);
35}
36
37void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout)
38{
39 // Reshape the weights in-place
40 const TensorShape& weightShape = weightInfo.GetShape();
41 switch (dataLayout)
42 {
43 case DataLayout::NHWC:
44 // The data layout is NHWC, reshape from [ H, W, I, M ] to [ 1, H, W, I * M ]
45 weightInfo.SetShape({ 1,
46 weightShape[0],
47 weightShape[1],
48 weightShape[2] * weightShape[3] });
Matteo Martincigh747ef822018-12-18 09:26:39 +000049 weightInfo.SetShape({ 1,
50 weightShape[0] * weightShape[1],
51 weightShape[2],
52 weightShape[3] });
53 break;
Kevin May665a964a2019-08-21 16:53:50 +010054 case DataLayout::NCHW:
55 default:
56 // The data layout is NCHW, reshape from [ M, I, H, W ] to [ 1, I * M, H, W, ]
57 weightInfo.SetShape({ 1, weightShape[0] * weightShape[1], weightShape[2], weightShape[3] });
58 break;
Matteo Martincigh747ef822018-12-18 09:26:39 +000059 }
60}
61
Kevin May665a964a2019-08-21 16:53:50 +010062template <typename DataType>
63ConstTensor ReorderWeightChannelsForAcl(const ConstTensor& weightHandle, DataLayout dataLayout, void* permuteBuffer)
64{
65 DataType* weight = static_cast<DataType*>(permuteBuffer);
66 const TensorShape& weightShape = weightHandle.GetShape();
67 unsigned int multiplier;
68 unsigned int height;
69 unsigned int width;
70 unsigned int inputChannels;
71 switch (dataLayout)
72 {
73 case DataLayout::NHWC: //It actually is [ H, W, I, M ]
74 height = weightShape[0];
75 width = weightShape[1];
76 inputChannels = weightShape[2];
77 multiplier = weightShape[3];
78 break;
79 case DataLayout::NCHW: //It actually is [ M, I, H, W ]
80 default:
81 height = weightShape[2];
82 width = weightShape[3];
83 inputChannels = weightShape[1];
84 multiplier = weightShape[0];
85 break;
86 }
87
Rob Hughes93667b12019-09-23 16:24:05 +010088 std::vector<DataType> weightAclOrder(height*width*inputChannels*multiplier);
Kevin May665a964a2019-08-21 16:53:50 +010089 unsigned int destinationWeightsChannel;
90 unsigned int totalChannels = inputChannels * multiplier;
91 unsigned int channelSize = height * width;
Teresa Charlin93cbbcc2019-12-18 22:10:47 +000092 unsigned int inputChannel = 0;
Kevin May665a964a2019-08-21 16:53:50 +010093
94 for (unsigned int originWeightsChannel = 0; originWeightsChannel < totalChannels; originWeightsChannel++)
95 {
Teresa Charlin93cbbcc2019-12-18 22:10:47 +000096 inputChannel = originWeightsChannel % inputChannels;
97 destinationWeightsChannel = (originWeightsChannel - inputChannel) / inputChannels + multiplier * inputChannel;
Kevin May665a964a2019-08-21 16:53:50 +010098
99 for (unsigned int i = 0; i < channelSize; i++)
100 {
101 weightAclOrder[i + destinationWeightsChannel * channelSize] =
102 weight[i + originWeightsChannel * channelSize];
103 }
104 }
105
Rob Hughes93667b12019-09-23 16:24:05 +0100106 ::memcpy(permuteBuffer, weightAclOrder.data(), weightHandle.GetInfo().GetNumBytes());
Kevin May665a964a2019-08-21 16:53:50 +0100107 return ConstTensor(weightHandle.GetInfo(), permuteBuffer);
108}
109
Matteo Martincigh747ef822018-12-18 09:26:39 +0000110TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout)
111{
112 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
113 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
114
115 // 1. Permute the weights if necessary
116 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
117 // starting from the current shape of [ M, I, H, W ]
118 TensorInfo weightPermutedInfo(weightInfo);
119 if (dataLayout == DataLayout::NHWC)
120 {
121 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
122 PermutationVector permutationVector{ 3, 2, 0, 1 };
123 weightPermutedInfo = armnnUtils::Permuted(weightInfo, permutationVector);
124 }
125
126 // 2. Reshape the weights
127 ReshapeWeightsForAcl(weightPermutedInfo, dataLayout);
128
129 // 3. Return the permuted weight info
130 return weightPermutedInfo;
131}
132
133armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
134 DataLayout dataLayout,
135 void* permuteBuffer)
136{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100137 ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
138 ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
Matteo Martincigh747ef822018-12-18 09:26:39 +0000139
Kevin May665a964a2019-08-21 16:53:50 +0100140 auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
141 auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
142
Matteo Martincigh747ef822018-12-18 09:26:39 +0000143 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
144 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
145
146 // 1. Permute the weights if necessary
147 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
148 // starting from the current shape of [ M, I, H, W ]
149 // If no permutation is necessary, leave the permutation vector empty
150 PermutationVector permutationVector{};
151 if (dataLayout == DataLayout::NHWC)
152 {
153 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
154 permutationVector = { 3, 2, 0, 1 };
155 }
156 ConstTensor weightPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer);
157
Kevin May665a964a2019-08-21 16:53:50 +0100158 // Shuffle the weights data to obtain the channel order needed used by Acl
Rob Hughes93667b12019-09-23 16:24:05 +0100159 if (multiplier > 1 && inputChannels > 1 && dataLayout == DataLayout::NCHW)
Kevin May665a964a2019-08-21 16:53:50 +0100160 {
161 switch (weightPermuted.GetDataType())
162 {
163 case DataType::Float32:
164 weightPermuted = ReorderWeightChannelsForAcl<float>(weightPermuted, dataLayout, permuteBuffer);
165 break;
166 case DataType::Float16:
167 weightPermuted =
168 ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
169 break;
Keith Davisa8565012020-02-14 12:22:40 +0000170 case DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000171 case DataType::QAsymmU8:
Kevin May665a964a2019-08-21 16:53:50 +0100172 weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
173 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000174 ARMNN_NO_DEPRECATE_WARN_BEGIN
Teresa Charlina68d8532019-11-29 13:59:18 +0000175 case DataType::QuantizedSymm8PerAxis:
Derek Lambertid466a542020-01-22 15:37:29 +0000176 ARMNN_FALLTHROUGH;
177 case DataType::QSymmS8:
Teresa Charlina68d8532019-11-29 13:59:18 +0000178 weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer);
179 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000180 ARMNN_NO_DEPRECATE_WARN_END
Kevin May665a964a2019-08-21 16:53:50 +0100181 default:
182 break;
183 }
184 }
185
Matteo Martincigh747ef822018-12-18 09:26:39 +0000186 // 2. Reshape the weights
187 ReshapeWeightsForAcl(weightPermuted.GetInfo(), dataLayout);
188
189 // 3. Return both the tensor and the allocated storage to ensure that the data stays alive
190 return weightPermuted;
191}
192
Francis Murtaghec33a912019-11-05 14:26:23 +0000193int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
194{
195 int32_t reversedMask = 0;
Matthew Sloyan171214c2020-09-09 09:07:37 +0100196 for (unsigned int i = 0; i < armnn::numeric_cast<unsigned int>(numDim); ++i)
Francis Murtaghec33a912019-11-05 14:26:23 +0000197 {
198 // Check if bit set in mask for each dimension
199 int32_t bit = (mask & 1 << i) != 0;
200 // Increment the new mask with the bits reversed
Matthew Sloyan171214c2020-09-09 09:07:37 +0100201 reversedMask += (bit << std::max(numDim-(armnn::numeric_cast<int>(i)+1), 0));
Francis Murtaghec33a912019-11-05 14:26:23 +0000202 }
203
204 return reversedMask;
205}
206
Matteo Martincigh747ef822018-12-18 09:26:39 +0000207} // namespace armnn