blob: 8f2fb48238faa65a7133b6ad57d355f2efcda41d [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Francis Murtaghc347b172022-08-02 19:42:29 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Francis Murtaghc347b172022-08-02 19:42:29 +01007#include <BFloat16.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <Half.hpp>
Matthew Bentham14e46692018-09-20 15:35:30 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <aclCommon/ArmComputeTensorUtils.hpp>
11#include <cl/OpenClTimer.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000012#include <armnn/backends/TensorHandle.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013
Derek Lambertid466a542020-01-22 15:37:29 +000014#include <armnn/Utils.hpp>
15
Matthew Bentham9b3e7382020-02-05 21:39:55 +000016#include <arm_compute/runtime/CL/CLTensor.h>
17#include <arm_compute/runtime/IFunction.h>
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +010018
19#include <sstream>
20
telsoa01c577f2c2018-08-31 09:22:23 +010021#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
22 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
Keith Davis5a64f222021-08-04 10:35:20 +010023 armnn::EmptyOptional(), \
24 name, \
25 armnn::OpenClTimer(), \
26 armnn::WallClockTimer())
27
28#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \
29 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
30 guid, \
telsoa01c577f2c2018-08-31 09:22:23 +010031 name, \
32 armnn::OpenClTimer(), \
33 armnn::WallClockTimer())
telsoa014fcda012018-03-09 14:13:49 +000034
35namespace armnn
36{
37
Keith Davis5a64f222021-08-04 10:35:20 +010038inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
39{
40 switch (convolutionMethod)
41 {
42 case arm_compute::ConvolutionMethod::FFT:
43 return "FFT";
44 case arm_compute::ConvolutionMethod::DIRECT:
45 return "Direct";
46 case arm_compute::ConvolutionMethod::GEMM:
47 return "GEMM";
48 case arm_compute::ConvolutionMethod::WINOGRAD:
49 return "Winograd";
50 default:
51 return "Unknown";
52 }
53}
54
telsoa014fcda012018-03-09 14:13:49 +000055template <typename T>
Matthew Benthamca6616c2018-09-21 15:16:53 +010056void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData)
telsoa014fcda012018-03-09 14:13:49 +000057{
58 {
telsoa01c577f2c2018-08-31 09:22:23 +010059 ARMNN_SCOPED_PROFILING_EVENT_CL("MapClTensorForWriting");
telsoa014fcda012018-03-09 14:13:49 +000060 dstTensor.map(true);
61 }
62
63 {
telsoa01c577f2c2018-08-31 09:22:23 +010064 ARMNN_SCOPED_PROFILING_EVENT_CL("CopyToClTensor");
telsoa014fcda012018-03-09 14:13:49 +000065 armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor);
66 }
67
68 dstTensor.unmap();
69}
70
keidav01d74dc912018-12-10 18:16:07 +000071inline auto SetClStridedSliceData(const std::vector<int>& m_begin,
72 const std::vector<int>& m_end,
73 const std::vector<int>& m_stride)
74{
75 arm_compute::Coordinates starts;
76 arm_compute::Coordinates ends;
77 arm_compute::Coordinates strides;
78
79 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
80
81 for (unsigned int i = 0; i < num_dims; i++) {
82 unsigned int revertedIndex = num_dims - i - 1;
83
84 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
85 ends.set(i, static_cast<int>(m_end[revertedIndex]));
86 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
87 }
88
89 return std::make_tuple(starts, ends, strides);
90}
91
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000092inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
93 const std::vector<unsigned int>& m_size)
94{
95 // This function must translate the size vector given to an end vector
96 // expected by the ACL NESlice workload
97 arm_compute::Coordinates starts;
98 arm_compute::Coordinates ends;
99
100 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
101
102 // For strided slices, we have the relationship size = (end - begin) / stride
103 // For slice, we assume stride to be a vector of all ones, yielding the formula
104 // size = (end - begin) therefore we know end = size + begin
105 for (unsigned int i = 0; i < num_dims; i++)
106 {
107 unsigned int revertedIndex = num_dims - i - 1;
108
109 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
110 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
111 }
112
113 return std::make_tuple(starts, ends);
114}
115
Matthew Bentham785df502018-09-21 10:29:58 +0100116inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
James Conroy1f58f032021-04-27 17:13:27 +0100117 const ConstTensorHandle* handle)
telsoa01c577f2c2018-08-31 09:22:23 +0100118{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100119 ARMNN_ASSERT(handle);
Matthew Benthamca6616c2018-09-21 15:16:53 +0100120
121 armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
telsoa01c577f2c2018-08-31 09:22:23 +0100122 switch(handle->GetTensorInfo().GetDataType())
123 {
124 case DataType::Float16:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100125 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::Half>());
telsoa01c577f2c2018-08-31 09:22:23 +0100126 break;
127 case DataType::Float32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100128 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
telsoa01c577f2c2018-08-31 09:22:23 +0100129 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +0000130 case DataType::QAsymmU8:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100131 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100132 break;
Narumol Prangnawarat33d2c782020-11-13 18:00:23 +0000133 case DataType::QAsymmS8:
Derek Lambertid466a542020-01-22 15:37:29 +0000134 case DataType::QSymmS8:
Keith Davis899f64f2019-11-26 16:01:18 +0000135 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>());
136 break;
Ryan OShea2323af42020-05-13 16:36:19 +0100137 case DataType::QSymmS16:
138 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int16_t>());
139 break;
Matthew Bentham785df502018-09-21 10:29:58 +0100140 case DataType::Signed32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100141 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100142 break;
Francis Murtaghc347b172022-08-02 19:42:29 +0100143 case DataType::BFloat16:
144 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::BFloat16>());
145 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100146 default:
Francis Murtaghc347b172022-08-02 19:42:29 +0100147 // Throw exception; assertion not called in release build.
148 throw Exception("Unexpected tensor type during InitializeArmComputeClTensorData().");
telsoa01c577f2c2018-08-31 09:22:23 +0100149 }
150};
151
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100152inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
153{
154 std::stringstream message;
155 message << "CL error: " << clError.what() << ". Error code: " << clError.err();
156
157 return RuntimeException(message.str(), location);
158}
159
160inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
161{
162 try
163 {
164 function.run();
165 }
166 catch (cl::Error& error)
167 {
168 throw WrapClError(error, location);
169 }
170}
171
David Monahanc11ba462020-12-03 11:09:46 +0000172template <typename DataType, typename PayloadType>
173DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
174{
175 ITensorHandle* tensorHandle = data.m_Outputs[idx];
176 return reinterpret_cast<DataType*>(tensorHandle->Map());
177}
178
telsoa014fcda012018-03-09 14:13:49 +0000179} //namespace armnn