Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
Matteo Martincigh | e5b8eb9 | 2019-11-28 15:45:42 +0000 | [diff] [blame] | 6 | #include <backendsCommon/WorkloadUtils.hpp> |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 7 | |
| 8 | namespace armnn |
| 9 | { |
| 10 | |
| 11 | armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor, |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 12 | const PermutationVector& permutationVector, void* permuteBuffer) |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 13 | { |
| 14 | BOOST_ASSERT_MSG(tensor, "Invalid input tensor"); |
| 15 | BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer"); |
| 16 | |
| 17 | TensorInfo tensorInfo = tensor->GetTensorInfo(); |
| 18 | |
| 19 | if (permutationVector.GetSize() > 0) |
| 20 | { |
| 21 | tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector); |
| 22 | armnnUtils::Permute(tensorInfo.GetShape(), permutationVector, |
| 23 | tensor->GetConstTensor<void>(), permuteBuffer, |
| 24 | GetDataTypeSize(tensorInfo.GetDataType())); |
| 25 | } |
| 26 | else |
| 27 | { |
| 28 | ::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes()); |
| 29 | } |
| 30 | |
| 31 | return ConstTensor(tensorInfo, permuteBuffer); |
| 32 | } |
| 33 | |
| 34 | void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout) |
| 35 | { |
| 36 | // Reshape the weights in-place |
| 37 | const TensorShape& weightShape = weightInfo.GetShape(); |
| 38 | switch (dataLayout) |
| 39 | { |
| 40 | case DataLayout::NHWC: |
| 41 | // The data layout is NHWC, reshape from [ H, W, I, M ] to [ 1, H, W, I * M ] |
| 42 | weightInfo.SetShape({ 1, |
| 43 | weightShape[0], |
| 44 | weightShape[1], |
| 45 | weightShape[2] * weightShape[3] }); |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 46 | weightInfo.SetShape({ 1, |
| 47 | weightShape[0] * weightShape[1], |
| 48 | weightShape[2], |
| 49 | weightShape[3] }); |
| 50 | break; |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 51 | case DataLayout::NCHW: |
| 52 | default: |
| 53 | // The data layout is NCHW, reshape from [ M, I, H, W ] to [ 1, I * M, H, W, ] |
| 54 | weightInfo.SetShape({ 1, weightShape[0] * weightShape[1], weightShape[2], weightShape[3] }); |
| 55 | break; |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 56 | } |
| 57 | } |
| 58 | |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 59 | template <typename DataType> |
| 60 | ConstTensor ReorderWeightChannelsForAcl(const ConstTensor& weightHandle, DataLayout dataLayout, void* permuteBuffer) |
| 61 | { |
| 62 | DataType* weight = static_cast<DataType*>(permuteBuffer); |
| 63 | const TensorShape& weightShape = weightHandle.GetShape(); |
| 64 | unsigned int multiplier; |
| 65 | unsigned int height; |
| 66 | unsigned int width; |
| 67 | unsigned int inputChannels; |
| 68 | switch (dataLayout) |
| 69 | { |
| 70 | case DataLayout::NHWC: //It actually is [ H, W, I, M ] |
| 71 | height = weightShape[0]; |
| 72 | width = weightShape[1]; |
| 73 | inputChannels = weightShape[2]; |
| 74 | multiplier = weightShape[3]; |
| 75 | break; |
| 76 | case DataLayout::NCHW: //It actually is [ M, I, H, W ] |
| 77 | default: |
| 78 | height = weightShape[2]; |
| 79 | width = weightShape[3]; |
| 80 | inputChannels = weightShape[1]; |
| 81 | multiplier = weightShape[0]; |
| 82 | break; |
| 83 | } |
| 84 | |
Rob Hughes | 93667b1 | 2019-09-23 16:24:05 +0100 | [diff] [blame] | 85 | std::vector<DataType> weightAclOrder(height*width*inputChannels*multiplier); |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 86 | unsigned int destinationWeightsChannel; |
| 87 | unsigned int totalChannels = inputChannels * multiplier; |
| 88 | unsigned int channelSize = height * width; |
Teresa Charlin | 93cbbcc | 2019-12-18 22:10:47 +0000 | [diff] [blame] | 89 | unsigned int inputChannel = 0; |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 90 | |
| 91 | for (unsigned int originWeightsChannel = 0; originWeightsChannel < totalChannels; originWeightsChannel++) |
| 92 | { |
Teresa Charlin | 93cbbcc | 2019-12-18 22:10:47 +0000 | [diff] [blame] | 93 | inputChannel = originWeightsChannel % inputChannels; |
| 94 | destinationWeightsChannel = (originWeightsChannel - inputChannel) / inputChannels + multiplier * inputChannel; |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 95 | |
| 96 | for (unsigned int i = 0; i < channelSize; i++) |
| 97 | { |
| 98 | weightAclOrder[i + destinationWeightsChannel * channelSize] = |
| 99 | weight[i + originWeightsChannel * channelSize]; |
| 100 | } |
| 101 | } |
| 102 | |
Rob Hughes | 93667b1 | 2019-09-23 16:24:05 +0100 | [diff] [blame] | 103 | ::memcpy(permuteBuffer, weightAclOrder.data(), weightHandle.GetInfo().GetNumBytes()); |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 104 | return ConstTensor(weightHandle.GetInfo(), permuteBuffer); |
| 105 | } |
| 106 | |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 107 | TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout) |
| 108 | { |
| 109 | // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either |
| 110 | // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library |
| 111 | |
| 112 | // 1. Permute the weights if necessary |
| 113 | // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done |
| 114 | // starting from the current shape of [ M, I, H, W ] |
| 115 | TensorInfo weightPermutedInfo(weightInfo); |
| 116 | if (dataLayout == DataLayout::NHWC) |
| 117 | { |
| 118 | // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ] |
| 119 | PermutationVector permutationVector{ 3, 2, 0, 1 }; |
| 120 | weightPermutedInfo = armnnUtils::Permuted(weightInfo, permutationVector); |
| 121 | } |
| 122 | |
| 123 | // 2. Reshape the weights |
| 124 | ReshapeWeightsForAcl(weightPermutedInfo, dataLayout); |
| 125 | |
| 126 | // 3. Return the permuted weight info |
| 127 | return weightPermutedInfo; |
| 128 | } |
| 129 | |
| 130 | armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor, |
| 131 | DataLayout dataLayout, |
| 132 | void* permuteBuffer) |
| 133 | { |
| 134 | BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor"); |
| 135 | BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer"); |
| 136 | |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 137 | auto multiplier = weightTensor->GetTensorInfo().GetShape()[0]; |
| 138 | auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1]; |
| 139 | |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 140 | // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either |
| 141 | // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library |
| 142 | |
| 143 | // 1. Permute the weights if necessary |
| 144 | // If the data layout is NCHW no permutation is necessary, as a reshape to [ 1, I * M, H, W ] can be better done |
| 145 | // starting from the current shape of [ M, I, H, W ] |
| 146 | // If no permutation is necessary, leave the permutation vector empty |
| 147 | PermutationVector permutationVector{}; |
| 148 | if (dataLayout == DataLayout::NHWC) |
| 149 | { |
| 150 | // The data layout is NHWC, then permute the weights from [ M, I, H, W ] to [ H, W, I, M ] |
| 151 | permutationVector = { 3, 2, 0, 1 }; |
| 152 | } |
| 153 | ConstTensor weightPermuted = PermuteTensor(weightTensor, permutationVector, permuteBuffer); |
| 154 | |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 155 | // Shuffle the weights data to obtain the channel order needed used by Acl |
Rob Hughes | 93667b1 | 2019-09-23 16:24:05 +0100 | [diff] [blame] | 156 | if (multiplier > 1 && inputChannels > 1 && dataLayout == DataLayout::NCHW) |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 157 | { |
| 158 | switch (weightPermuted.GetDataType()) |
| 159 | { |
| 160 | case DataType::Float32: |
| 161 | weightPermuted = ReorderWeightChannelsForAcl<float>(weightPermuted, dataLayout, permuteBuffer); |
| 162 | break; |
| 163 | case DataType::Float16: |
| 164 | weightPermuted = |
| 165 | ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer); |
| 166 | break; |
Derek Lamberti | f90c56d | 2020-01-10 17:14:08 +0000 | [diff] [blame] | 167 | case DataType::QAsymmU8: |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 168 | weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer); |
| 169 | break; |
Teresa Charlin | a68d853 | 2019-11-29 13:59:18 +0000 | [diff] [blame] | 170 | case DataType::QuantizedSymm8PerAxis: |
| 171 | weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer); |
| 172 | break; |
Kevin May | 665a964a | 2019-08-21 16:53:50 +0100 | [diff] [blame] | 173 | default: |
| 174 | break; |
| 175 | } |
| 176 | } |
| 177 | |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 178 | // 2. Reshape the weights |
| 179 | ReshapeWeightsForAcl(weightPermuted.GetInfo(), dataLayout); |
| 180 | |
| 181 | // 3. Return both the tensor and the allocated storage to ensure that the data stays alive |
| 182 | return weightPermuted; |
| 183 | } |
| 184 | |
Francis Murtagh | ec33a91 | 2019-11-05 14:26:23 +0000 | [diff] [blame] | 185 | int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim) |
| 186 | { |
| 187 | int32_t reversedMask = 0; |
| 188 | for (unsigned int i = 0; i < boost::numeric_cast<unsigned int>(numDim); ++i) |
| 189 | { |
| 190 | // Check if bit set in mask for each dimension |
| 191 | int32_t bit = (mask & 1 << i) != 0; |
| 192 | // Increment the new mask with the bits reversed |
| 193 | reversedMask += (bit << std::max(numDim-(boost::numeric_cast<int>(i)+1), 0)); |
| 194 | } |
| 195 | |
| 196 | return reversedMask; |
| 197 | } |
| 198 | |
Matteo Martincigh | 747ef82 | 2018-12-18 09:26:39 +0000 | [diff] [blame] | 199 | } // namespace armnn |