blob: cb1f7c117a89bc810fa47468b37ea777a34b2387 [file] [log] [blame]
Matteo Martincigh747ef822018-12-18 09:26:39 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Matteo Martincighe5b8eb92019-11-28 15:45:42 +00006#include <backendsCommon/WorkloadUtils.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +00007
8namespace armnn
9{
10
11armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
Kevin May665a964a2019-08-21 16:53:50 +010012 const PermutationVector& permutationVector, void* permuteBuffer)
Matteo Martincigh747ef822018-12-18 09:26:39 +000013{
14 BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
15 BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
16
17 TensorInfo tensorInfo = tensor->GetTensorInfo();
18
19 if (permutationVector.GetSize() > 0)
20 {
21 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector);
22 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector,
23 tensor->GetConstTensor<void>(), permuteBuffer,
24 GetDataTypeSize(tensorInfo.GetDataType()));
25 }
26 else
27 {
28 ::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
29 }
30
31 return ConstTensor(tensorInfo, permuteBuffer);
32}
33
34void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout)
35{
36 // Reshape the weights in-place
37 const TensorShape& weightShape = weightInfo.GetShape();
38 switch (dataLayout)
39 {
40 case DataLayout::NHWC:
41 // The data layout is NHWC, reshape from [ H, W, I, M ] to [ 1, H, W, I * M ]
42 weightInfo.SetShape({ 1,
43 weightShape[0],
44 weightShape[1],
45 weightShape[2] * weightShape[3] });
Matteo Martincigh747ef822018-12-18 09:26:39 +000046 weightInfo.SetShape({ 1,
47 weightShape[0] * weightShape[1],
48 weightShape[2],
49 weightShape[3] });
50 break;
Kevin May665a964a2019-08-21 16:53:50 +010051 case DataLayout::NCHW:
52 default:
53 // The data layout is NCHW, reshape from [ M, I, H, W ] to [ 1, I * M, H, W, ]
54 weightInfo.SetShape({ 1, weightShape[0] * weightShape[1], weightShape[2], weightShape[3] });
55 break;
Matteo Martincigh747ef822018-12-18 09:26:39 +000056 }
57}
58
Kevin May665a964a2019-08-21 16:53:50 +010059template <typename DataType>
60ConstTensor ReorderWeightChannelsForAcl(const ConstTensor& weightHandle, DataLayout dataLayout, void* permuteBuffer)
61{
62 DataType* weight = static_cast<DataType*>(permuteBuffer);
63 const TensorShape& weightShape = weightHandle.GetShape();
64 unsigned int multiplier;
65 unsigned int height;
66 unsigned int width;
67 unsigned int inputChannels;
68 switch (dataLayout)
69 {
70 case DataLayout::NHWC: //It actually is [ H, W, I, M ]
71 height = weightShape[0];
72 width = weightShape[1];
73 inputChannels = weightShape[2];
74 multiplier = weightShape[3];
75 break;
76 case DataLayout::NCHW: //It actually is [ M, I, H, W ]
77 default:
78 height = weightShape[2];
79 width = weightShape[3];
80 inputChannels = weightShape[1];
81 multiplier = weightShape[0];
82 break;
83 }
84
Rob Hughes93667b12019-09-23 16:24:05 +010085 std::vector<DataType> weightAclOrder(height*width*inputChannels*multiplier);
Kevin May665a964a2019-08-21 16:53:50 +010086 unsigned int destinationWeightsChannel;
87 unsigned int totalChannels = inputChannels * multiplier;
88 unsigned int channelSize = height * width;
Teresa Charlin93cbbcc2019-12-18 22:10:47 +000089 unsigned int inputChannel = 0;
Kevin May665a964a2019-08-21 16:53:50 +010090
91 for (unsigned int originWeightsChannel = 0; originWeightsChannel < totalChannels; originWeightsChannel++)
92 {
Teresa Charlin93cbbcc2019-12-18 22:10:47 +000093 inputChannel = originWeightsChannel % inputChannels;
94 destinationWeightsChannel = (originWeightsChannel - inputChannel) / inputChannels + multiplier * inputChannel;
Kevin May665a964a2019-08-21 16:53:50 +010095
96 for (unsigned int i = 0; i < channelSize; i++)
97 {
98 weightAclOrder[i + destinationWeightsChannel * channelSize] =
99 weight[i + originWeightsChannel * channelSize];
100 }
101 }
102
Rob Hughes93667b12019-09-23 16:24:05 +0100103 ::memcpy(permuteBuffer, weightAclOrder.data(), weightHandle.GetInfo().GetNumBytes());
Kevin May665a964a2019-08-21 16:53:50 +0100104 return ConstTensor(weightHandle.GetInfo(), permuteBuffer);
105}
106
Matteo Martincigh747ef822018-12-18 09:26:39 +0000107TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout)
108{
109 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
110 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
111
112 // 1. Permute the weights if necessary
113 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
114 // starting from the current shape of [ M, I, H, W ]
115 TensorInfo weightPermutedInfo(weightInfo);
116 if (dataLayout == DataLayout::NHWC)
117 {
118 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
119 PermutationVector permutationVector{ 3, 2, 0, 1 };
120 weightPermutedInfo = armnnUtils::Permuted(weightInfo, permutationVector);
121 }
122
123 // 2. Reshape the weights
124 ReshapeWeightsForAcl(weightPermutedInfo, dataLayout);
125
126 // 3. Return the permuted weight info
127 return weightPermutedInfo;
128}
129
130armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
131 DataLayout dataLayout,
132 void* permuteBuffer)
133{
134 BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
135 BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
136
Kevin May665a964a2019-08-21 16:53:50 +0100137 auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
138 auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
139
Matteo Martincigh747ef822018-12-18 09:26:39 +0000140 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
141 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
142
143 // 1. Permute the weights if necessary
144 // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done
145 // starting from the current shape of [ M, I, H, W ]
146 // If no permutation is necessary, leave the permutation vector empty
147 PermutationVector permutationVector{};
148 if (dataLayout == DataLayout::NHWC)
149 {
150 // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ]
151 permutationVector = { 3, 2, 0, 1 };
152 }
153 ConstTensor weightPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer);
154
Kevin May665a964a2019-08-21 16:53:50 +0100155 // Shuffle the weights data to obtain the channel order needed used by Acl
Rob Hughes93667b12019-09-23 16:24:05 +0100156 if (multiplier > 1 && inputChannels > 1 && dataLayout == DataLayout::NCHW)
Kevin May665a964a2019-08-21 16:53:50 +0100157 {
158 switch (weightPermuted.GetDataType())
159 {
160 case DataType::Float32:
161 weightPermuted = ReorderWeightChannelsForAcl<float>(weightPermuted, dataLayout, permuteBuffer);
162 break;
163 case DataType::Float16:
164 weightPermuted =
165 ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
166 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +0000167 case DataType::QAsymmU8:
Kevin May665a964a2019-08-21 16:53:50 +0100168 weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
169 break;
Teresa Charlina68d8532019-11-29 13:59:18 +0000170 case DataType::QuantizedSymm8PerAxis:
171 weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer);
172 break;
Kevin May665a964a2019-08-21 16:53:50 +0100173 default:
174 break;
175 }
176 }
177
Matteo Martincigh747ef822018-12-18 09:26:39 +0000178 // 2. Reshape the weights
179 ReshapeWeightsForAcl(weightPermuted.GetInfo(), dataLayout);
180
181 // 3. Return both the tensor and the allocated storage to ensure that the data stays alive
182 return weightPermuted;
183}
184
Francis Murtaghec33a912019-11-05 14:26:23 +0000185int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
186{
187 int32_t reversedMask = 0;
188 for (unsigned int i = 0; i < boost::numeric_cast<unsigned int>(numDim); ++i)
189 {
190 // Check if bit set in mask for each dimension
191 int32_t bit = (mask & 1 << i) != 0;
192 // Increment the new mask with the bits reversed
193 reversedMask += (bit << std::max(numDim-(boost::numeric_cast<int>(i)+1), 0));
194 }
195
196 return reversedMask;
197}
198
Matteo Martincigh747ef822018-12-18 09:26:39 +0000199} // namespace armnn