blob: 9f8bb9540ecef6740c4107e413c7dae135d69a20 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kellyec67a0f2022-11-25 13:55:24 +00002// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Colm Donelan0c479742021-12-10 12:43:54 +00007#include <armnn/backends/Workload.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <aclCommon/ArmComputeTensorUtils.hpp>
9#include <neon/NeonTensorHandle.hpp>
10#include <neon/NeonTimer.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000011#include <armnn/backends/TensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
Derek Lambertid466a542020-01-22 15:37:29 +000013#include <armnn/Utils.hpp>
14
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010015#include <Half.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010017#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
telsoa01c577f2c2018-08-31 09:22:23 +010018 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
Keith Davis5a64f222021-08-04 10:35:20 +010019 armnn::EmptyOptional(), \
20 name, \
21 armnn::NeonTimer(), \
22 armnn::WallClockTimer())
23
24#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
26 guid, \
telsoa01c577f2c2018-08-31 09:22:23 +010027 name, \
Nina Drozd69851b52018-09-21 18:42:09 +010028 armnn::NeonTimer(), \
29 armnn::WallClockTimer())
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010030
31using namespace armnn::armcomputetensorutils;
32
33namespace armnn
34{
35
Keith Davis5a64f222021-08-04 10:35:20 +010036inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
37{
38 switch (convolutionMethod)
39 {
40 case arm_compute::ConvolutionMethod::FFT:
41 return "FFT";
42 case arm_compute::ConvolutionMethod::DIRECT:
43 return "Direct";
44 case arm_compute::ConvolutionMethod::GEMM:
45 return "GEMM";
46 case arm_compute::ConvolutionMethod::WINOGRAD:
47 return "Winograd";
48 default:
49 return "Unknown";
50 }
51}
52
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010053template <typename T>
54void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
55{
56 InitialiseArmComputeTensorEmpty(dstTensor);
57 CopyArmComputeITensorData(srcData, dstTensor);
58}
59
60inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
Mike Kellyec67a0f2022-11-25 13:55:24 +000061 TensorInfo tensorInfo,
62 const ITensorHandle* handle)
63{
64 ARMNN_ASSERT(handle);
65
66 switch(tensorInfo.GetDataType())
67 {
68 case DataType::Float16:
69 CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
70 break;
71 case DataType::Float32:
72 CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
73 break;
74 case DataType::QAsymmU8:
75 CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
76 break;
77 case DataType::QSymmS8:
78 case DataType::QAsymmS8:
79 CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
80 break;
81 case DataType::Signed32:
82 CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
83 break;
84 case DataType::QSymmS16:
85 CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
86 break;
87 case DataType::BFloat16:
88 CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
89 break;
90 default:
91 // Throw exception; assertion not called in release build.
92 throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
93 }
94};
95
96inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
James Conroy1f58f032021-04-27 17:13:27 +010097 const ConstTensorHandle* handle)
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010098{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010099 ARMNN_ASSERT(handle);
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100100
101 switch(handle->GetTensorInfo().GetDataType())
102 {
103 case DataType::Float16:
104 CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::Half>());
105 break;
106 case DataType::Float32:
107 CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
108 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +0000109 case DataType::QAsymmU8:
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100110 CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
111 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000112 case DataType::QSymmS8:
Sadik Armagane5d0b932020-04-09 15:48:44 +0100113 case DataType::QAsymmS8:
Aron Virginas-Tar21fc28b2019-11-26 14:04:54 +0000114 CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
115 break;
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100116 case DataType::Signed32:
117 CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
118 break;
James Conroycc340932020-05-12 18:08:52 +0100119 case DataType::QSymmS16:
120 CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
121 break;
Francis Murtaghd301c0a2022-08-02 19:42:29 +0100122 case DataType::BFloat16:
123 CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::BFloat16>());
124 break;
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100125 default:
Francis Murtaghd301c0a2022-08-02 19:42:29 +0100126 // Throw exception; assertion not called in release build.
127 throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100128 }
129};
130
FinnWilliamsArm1fa19192019-08-02 17:26:31 +0100131inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
132 const std::vector<int>& m_end,
133 const std::vector<int>& m_stride)
134{
135 arm_compute::Coordinates starts;
136 arm_compute::Coordinates ends;
137 arm_compute::Coordinates strides;
138
139 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
140
141 for (unsigned int i = 0; i < num_dims; i++)
142 {
143 unsigned int revertedIndex = num_dims - i - 1;
144
145 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
146 ends.set(i, static_cast<int>(m_end[revertedIndex]));
147 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
148 }
149
150 return std::make_tuple(starts, ends, strides);
151}
152
josh minor036f02d2019-11-15 14:53:22 -0600153inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
154 const std::vector<unsigned int>& m_size)
155{
156 // This function must translate the size vector given to an end vector
157 // expected by the ACL NESlice workload
158 arm_compute::Coordinates starts;
159 arm_compute::Coordinates ends;
160
161 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
162
163 // For strided slices, we have the relationship size = (end - begin) / stride
164 // For slice, we assume stride to be a vector of all ones, yielding the formula
165 // size = (end - begin) therefore we know end = size + begin
166 for (unsigned int i = 0; i < num_dims; i++)
167 {
168 unsigned int revertedIndex = num_dims - i - 1;
169
170 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
171 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
172 }
173
174 return std::make_tuple(starts, ends);
175}
176
David Monahan97451b42020-12-03 09:48:06 +0000177template <typename DataType, typename PayloadType>
178DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
179{
180 ITensorHandle* tensorHandle = data.m_Outputs[idx];
181 return reinterpret_cast<DataType*>(tensorHandle->Map());
182}
183
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100184} //namespace armnn