blob: ebdd504a97ff7d0f04ec8cff03e762c13fb5ad5e [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin588cbdf2022-01-19 15:55:37 +00002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <Half.hpp>
Matthew Bentham14e46692018-09-20 15:35:30 +01008
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009#include <aclCommon/ArmComputeTensorUtils.hpp>
10#include <cl/OpenClTimer.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000011#include <armnn/backends/TensorHandle.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012
Derek Lambertid466a542020-01-22 15:37:29 +000013#include <armnn/Utils.hpp>
14
Matthew Bentham9b3e7382020-02-05 21:39:55 +000015#include <arm_compute/runtime/CL/CLTensor.h>
16#include <arm_compute/runtime/IFunction.h>
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +010017
18#include <sstream>
19
telsoa01c577f2c2018-08-31 09:22:23 +010020#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
21 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
Keith Davis5a64f222021-08-04 10:35:20 +010022 armnn::EmptyOptional(), \
23 name, \
24 armnn::OpenClTimer(), \
25 armnn::WallClockTimer())
26
27#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \
28 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
29 guid, \
telsoa01c577f2c2018-08-31 09:22:23 +010030 name, \
31 armnn::OpenClTimer(), \
32 armnn::WallClockTimer())
telsoa014fcda012018-03-09 14:13:49 +000033
34namespace armnn
35{
36
Keith Davis5a64f222021-08-04 10:35:20 +010037inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
38{
39 switch (convolutionMethod)
40 {
41 case arm_compute::ConvolutionMethod::FFT:
42 return "FFT";
43 case arm_compute::ConvolutionMethod::DIRECT:
44 return "Direct";
45 case arm_compute::ConvolutionMethod::GEMM:
46 return "GEMM";
47 case arm_compute::ConvolutionMethod::WINOGRAD:
48 return "Winograd";
49 default:
50 return "Unknown";
51 }
52}
53
telsoa014fcda012018-03-09 14:13:49 +000054template <typename T>
Matthew Benthamca6616c2018-09-21 15:16:53 +010055void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData)
telsoa014fcda012018-03-09 14:13:49 +000056{
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 ARMNN_SCOPED_PROFILING_EVENT_CL("MapClTensorForWriting");
telsoa014fcda012018-03-09 14:13:49 +000059 dstTensor.map(true);
60 }
61
62 {
telsoa01c577f2c2018-08-31 09:22:23 +010063 ARMNN_SCOPED_PROFILING_EVENT_CL("CopyToClTensor");
telsoa014fcda012018-03-09 14:13:49 +000064 armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor);
65 }
66
67 dstTensor.unmap();
68}
69
keidav01d74dc912018-12-10 18:16:07 +000070inline auto SetClStridedSliceData(const std::vector<int>& m_begin,
71 const std::vector<int>& m_end,
72 const std::vector<int>& m_stride)
73{
74 arm_compute::Coordinates starts;
75 arm_compute::Coordinates ends;
76 arm_compute::Coordinates strides;
77
78 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
79
80 for (unsigned int i = 0; i < num_dims; i++) {
81 unsigned int revertedIndex = num_dims - i - 1;
82
83 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
84 ends.set(i, static_cast<int>(m_end[revertedIndex]));
85 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
86 }
87
88 return std::make_tuple(starts, ends, strides);
89}
90
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000091inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
92 const std::vector<unsigned int>& m_size)
93{
94 // This function must translate the size vector given to an end vector
95 // expected by the ACL NESlice workload
96 arm_compute::Coordinates starts;
97 arm_compute::Coordinates ends;
98
99 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
100
101 // For strided slices, we have the relationship size = (end - begin) / stride
102 // For slice, we assume stride to be a vector of all ones, yielding the formula
103 // size = (end - begin) therefore we know end = size + begin
104 for (unsigned int i = 0; i < num_dims; i++)
105 {
106 unsigned int revertedIndex = num_dims - i - 1;
107
108 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
109 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
110 }
111
112 return std::make_tuple(starts, ends);
113}
114
Matthew Bentham785df502018-09-21 10:29:58 +0100115inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
James Conroy1f58f032021-04-27 17:13:27 +0100116 const ConstTensorHandle* handle)
telsoa01c577f2c2018-08-31 09:22:23 +0100117{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100118 ARMNN_ASSERT(handle);
Matthew Benthamca6616c2018-09-21 15:16:53 +0100119
120 armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
telsoa01c577f2c2018-08-31 09:22:23 +0100121 switch(handle->GetTensorInfo().GetDataType())
122 {
123 case DataType::Float16:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100124 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::Half>());
telsoa01c577f2c2018-08-31 09:22:23 +0100125 break;
126 case DataType::Float32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100127 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
telsoa01c577f2c2018-08-31 09:22:23 +0100128 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +0000129 case DataType::QAsymmU8:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100130 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100131 break;
Narumol Prangnawarat33d2c782020-11-13 18:00:23 +0000132 case DataType::QAsymmS8:
Derek Lambertid466a542020-01-22 15:37:29 +0000133 case DataType::QSymmS8:
Keith Davis899f64f2019-11-26 16:01:18 +0000134 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>());
135 break;
Ryan OShea2323af42020-05-13 16:36:19 +0100136 case DataType::QSymmS16:
137 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int16_t>());
138 break;
Matthew Bentham785df502018-09-21 10:29:58 +0100139 case DataType::Signed32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100140 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100141 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100142 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100143 ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
telsoa01c577f2c2018-08-31 09:22:23 +0100144 }
145};
146
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100147inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
148{
149 std::stringstream message;
150 message << "CL error: " << clError.what() << ". Error code: " << clError.err();
151
152 return RuntimeException(message.str(), location);
153}
154
155inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
156{
157 try
158 {
159 function.run();
160 }
161 catch (cl::Error& error)
162 {
163 throw WrapClError(error, location);
164 }
165}
166
David Monahanc11ba462020-12-03 11:09:46 +0000167template <typename DataType, typename PayloadType>
168DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
169{
170 ITensorHandle* tensorHandle = data.m_Outputs[idx];
171 return reinterpret_cast<DataType*>(tensorHandle->Map());
172}
173
telsoa014fcda012018-03-09 14:13:49 +0000174} //namespace armnn