blob: 709300681c6a1b94f1414d8cafccad2dd1e4a880 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <Half.hpp>
Matthew Bentham14e46692018-09-20 15:35:30 +01008
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009#include <aclCommon/ArmComputeTensorUtils.hpp>
10#include <cl/OpenClTimer.hpp>
11#include <backendsCommon/CpuTensorHandle.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012
Derek Lambertid466a542020-01-22 15:37:29 +000013#include <armnn/Utils.hpp>
14
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +010015#include <arm_compute/runtime/CL/CLFunctions.h>
16
17#include <sstream>
18
telsoa01c577f2c2018-08-31 09:22:23 +010019#define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
20 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
21 name, \
22 armnn::OpenClTimer(), \
23 armnn::WallClockTimer())
telsoa014fcda012018-03-09 14:13:49 +000024
25namespace armnn
26{
27
28template <typename T>
Matthew Benthamca6616c2018-09-21 15:16:53 +010029void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData)
telsoa014fcda012018-03-09 14:13:49 +000030{
31 {
telsoa01c577f2c2018-08-31 09:22:23 +010032 ARMNN_SCOPED_PROFILING_EVENT_CL("MapClTensorForWriting");
telsoa014fcda012018-03-09 14:13:49 +000033 dstTensor.map(true);
34 }
35
36 {
telsoa01c577f2c2018-08-31 09:22:23 +010037 ARMNN_SCOPED_PROFILING_EVENT_CL("CopyToClTensor");
telsoa014fcda012018-03-09 14:13:49 +000038 armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor);
39 }
40
41 dstTensor.unmap();
42}
43
keidav01d74dc912018-12-10 18:16:07 +000044inline auto SetClStridedSliceData(const std::vector<int>& m_begin,
45 const std::vector<int>& m_end,
46 const std::vector<int>& m_stride)
47{
48 arm_compute::Coordinates starts;
49 arm_compute::Coordinates ends;
50 arm_compute::Coordinates strides;
51
52 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
53
54 for (unsigned int i = 0; i < num_dims; i++) {
55 unsigned int revertedIndex = num_dims - i - 1;
56
57 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
58 ends.set(i, static_cast<int>(m_end[revertedIndex]));
59 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
60 }
61
62 return std::make_tuple(starts, ends, strides);
63}
64
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000065inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
66 const std::vector<unsigned int>& m_size)
67{
68 // This function must translate the size vector given to an end vector
69 // expected by the ACL NESlice workload
70 arm_compute::Coordinates starts;
71 arm_compute::Coordinates ends;
72
73 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
74
75 // For strided slices, we have the relationship size = (end - begin) / stride
76 // For slice, we assume stride to be a vector of all ones, yielding the formula
77 // size = (end - begin) therefore we know end = size + begin
78 for (unsigned int i = 0; i < num_dims; i++)
79 {
80 unsigned int revertedIndex = num_dims - i - 1;
81
82 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
83 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
84 }
85
86 return std::make_tuple(starts, ends);
87}
88
Matthew Bentham785df502018-09-21 10:29:58 +010089inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
90 const ConstCpuTensorHandle* handle)
telsoa01c577f2c2018-08-31 09:22:23 +010091{
92 BOOST_ASSERT(handle);
Matthew Benthamca6616c2018-09-21 15:16:53 +010093
94 armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
telsoa01c577f2c2018-08-31 09:22:23 +010095 switch(handle->GetTensorInfo().GetDataType())
96 {
97 case DataType::Float16:
Matthew Benthamca6616c2018-09-21 15:16:53 +010098 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::Half>());
telsoa01c577f2c2018-08-31 09:22:23 +010099 break;
100 case DataType::Float32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100101 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
telsoa01c577f2c2018-08-31 09:22:23 +0100102 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +0000103 case DataType::QAsymmU8:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100104 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100105 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000106 ARMNN_NO_DEPRECATE_WARN_BEGIN
Keith Davis899f64f2019-11-26 16:01:18 +0000107 case DataType::QuantizedSymm8PerAxis:
Derek Lambertid466a542020-01-22 15:37:29 +0000108 ARMNN_FALLTHROUGH;
109 case DataType::QSymmS8:
Keith Davis899f64f2019-11-26 16:01:18 +0000110 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>());
111 break;
Derek Lambertid466a542020-01-22 15:37:29 +0000112 ARMNN_NO_DEPRECATE_WARN_END
Matthew Bentham785df502018-09-21 10:29:58 +0100113 case DataType::Signed32:
Matthew Benthamca6616c2018-09-21 15:16:53 +0100114 CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
Matthew Bentham785df502018-09-21 10:29:58 +0100115 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100116 default:
Matthew Bentham785df502018-09-21 10:29:58 +0100117 BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
telsoa01c577f2c2018-08-31 09:22:23 +0100118 }
119};
120
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100121inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location)
122{
123 std::stringstream message;
124 message << "CL error: " << clError.what() << ". Error code: " << clError.err();
125
126 return RuntimeException(message.str(), location);
127}
128
129inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location)
130{
131 try
132 {
133 function.run();
134 }
135 catch (cl::Error& error)
136 {
137 throw WrapClError(error, location);
138 }
139}
140
telsoa014fcda012018-03-09 14:13:49 +0000141} //namespace armnn