telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | #pragma once |
| 6 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 7 | #include <Half.hpp> |
Matthew Bentham | 14e4669 | 2018-09-20 15:35:30 +0100 | [diff] [blame] | 8 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 9 | #include <aclCommon/ArmComputeTensorUtils.hpp> |
| 10 | #include <cl/OpenClTimer.hpp> |
| 11 | #include <backendsCommon/CpuTensorHandle.hpp> |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 12 | |
Derek Lamberti | d466a54 | 2020-01-22 15:37:29 +0000 | [diff] [blame] | 13 | #include <armnn/Utils.hpp> |
| 14 | |
Aron Virginas-Tar | a8e06ed | 2018-10-19 16:46:15 +0100 | [diff] [blame] | 15 | #include <arm_compute/runtime/CL/CLFunctions.h> |
| 16 | |
| 17 | #include <sstream> |
| 18 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 19 | #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \ |
| 20 | ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ |
| 21 | name, \ |
| 22 | armnn::OpenClTimer(), \ |
| 23 | armnn::WallClockTimer()) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 24 | |
| 25 | namespace armnn |
| 26 | { |
| 27 | |
| 28 | template <typename T> |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 29 | void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 30 | { |
| 31 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 32 | ARMNN_SCOPED_PROFILING_EVENT_CL("MapClTensorForWriting"); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 33 | dstTensor.map(true); |
| 34 | } |
| 35 | |
| 36 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 37 | ARMNN_SCOPED_PROFILING_EVENT_CL("CopyToClTensor"); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 38 | armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor); |
| 39 | } |
| 40 | |
| 41 | dstTensor.unmap(); |
| 42 | } |
| 43 | |
keidav01 | d74dc91 | 2018-12-10 18:16:07 +0000 | [diff] [blame] | 44 | inline auto SetClStridedSliceData(const std::vector<int>& m_begin, |
| 45 | const std::vector<int>& m_end, |
| 46 | const std::vector<int>& m_stride) |
| 47 | { |
| 48 | arm_compute::Coordinates starts; |
| 49 | arm_compute::Coordinates ends; |
| 50 | arm_compute::Coordinates strides; |
| 51 | |
| 52 | unsigned int num_dims = static_cast<unsigned int>(m_begin.size()); |
| 53 | |
| 54 | for (unsigned int i = 0; i < num_dims; i++) { |
| 55 | unsigned int revertedIndex = num_dims - i - 1; |
| 56 | |
| 57 | starts.set(i, static_cast<int>(m_begin[revertedIndex])); |
| 58 | ends.set(i, static_cast<int>(m_end[revertedIndex])); |
| 59 | strides.set(i, static_cast<int>(m_stride[revertedIndex])); |
| 60 | } |
| 61 | |
| 62 | return std::make_tuple(starts, ends, strides); |
| 63 | } |
| 64 | |
Aron Virginas-Tar | 94c4fef | 2019-11-25 15:37:08 +0000 | [diff] [blame] | 65 | inline auto SetClSliceData(const std::vector<unsigned int>& m_begin, |
| 66 | const std::vector<unsigned int>& m_size) |
| 67 | { |
| 68 | // This function must translate the size vector given to an end vector |
| 69 | // expected by the ACL NESlice workload |
| 70 | arm_compute::Coordinates starts; |
| 71 | arm_compute::Coordinates ends; |
| 72 | |
| 73 | unsigned int num_dims = static_cast<unsigned int>(m_begin.size()); |
| 74 | |
| 75 | // For strided slices, we have the relationship size = (end - begin) / stride |
| 76 | // For slice, we assume stride to be a vector of all ones, yielding the formula |
| 77 | // size = (end - begin) therefore we know end = size + begin |
| 78 | for (unsigned int i = 0; i < num_dims; i++) |
| 79 | { |
| 80 | unsigned int revertedIndex = num_dims - i - 1; |
| 81 | |
| 82 | starts.set(i, static_cast<int>(m_begin[revertedIndex])); |
| 83 | ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex])); |
| 84 | } |
| 85 | |
| 86 | return std::make_tuple(starts, ends); |
| 87 | } |
| 88 | |
Matthew Bentham | 785df50 | 2018-09-21 10:29:58 +0100 | [diff] [blame] | 89 | inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor, |
| 90 | const ConstCpuTensorHandle* handle) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 91 | { |
| 92 | BOOST_ASSERT(handle); |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 93 | |
| 94 | armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 95 | switch(handle->GetTensorInfo().GetDataType()) |
| 96 | { |
| 97 | case DataType::Float16: |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 98 | CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<armnn::Half>()); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 99 | break; |
| 100 | case DataType::Float32: |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 101 | CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>()); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 102 | break; |
Derek Lamberti | f90c56d | 2020-01-10 17:14:08 +0000 | [diff] [blame] | 103 | case DataType::QAsymmU8: |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 104 | CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>()); |
Matthew Bentham | 785df50 | 2018-09-21 10:29:58 +0100 | [diff] [blame] | 105 | break; |
Derek Lamberti | d466a54 | 2020-01-22 15:37:29 +0000 | [diff] [blame] | 106 | ARMNN_NO_DEPRECATE_WARN_BEGIN |
Keith Davis | 899f64f | 2019-11-26 16:01:18 +0000 | [diff] [blame] | 107 | case DataType::QuantizedSymm8PerAxis: |
Derek Lamberti | d466a54 | 2020-01-22 15:37:29 +0000 | [diff] [blame] | 108 | ARMNN_FALLTHROUGH; |
| 109 | case DataType::QSymmS8: |
Keith Davis | 899f64f | 2019-11-26 16:01:18 +0000 | [diff] [blame] | 110 | CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>()); |
| 111 | break; |
Derek Lamberti | d466a54 | 2020-01-22 15:37:29 +0000 | [diff] [blame] | 112 | ARMNN_NO_DEPRECATE_WARN_END |
Matthew Bentham | 785df50 | 2018-09-21 10:29:58 +0100 | [diff] [blame] | 113 | case DataType::Signed32: |
Matthew Bentham | ca6616c | 2018-09-21 15:16:53 +0100 | [diff] [blame] | 114 | CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>()); |
Matthew Bentham | 785df50 | 2018-09-21 10:29:58 +0100 | [diff] [blame] | 115 | break; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 116 | default: |
Matthew Bentham | 785df50 | 2018-09-21 10:29:58 +0100 | [diff] [blame] | 117 | BOOST_ASSERT_MSG(false, "Unexpected tensor type."); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 118 | } |
| 119 | }; |
| 120 | |
Aron Virginas-Tar | a8e06ed | 2018-10-19 16:46:15 +0100 | [diff] [blame] | 121 | inline RuntimeException WrapClError(const cl::Error& clError, const CheckLocation& location) |
| 122 | { |
| 123 | std::stringstream message; |
| 124 | message << "CL error: " << clError.what() << ". Error code: " << clError.err(); |
| 125 | |
| 126 | return RuntimeException(message.str(), location); |
| 127 | } |
| 128 | |
| 129 | inline void RunClFunction(arm_compute::IFunction& function, const CheckLocation& location) |
| 130 | { |
| 131 | try |
| 132 | { |
| 133 | function.run(); |
| 134 | } |
| 135 | catch (cl::Error& error) |
| 136 | { |
| 137 | throw WrapClError(error, location); |
| 138 | } |
| 139 | } |
| 140 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 141 | } //namespace armnn |