blob: 860a8353d69ab5d746720d9ee434b08ed94615d8 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <backendsCommon/Workload.hpp>
8#include <aclCommon/ArmComputeTensorUtils.hpp>
9#include <neon/NeonTensorHandle.hpp>
10#include <neon/NeonTimer.hpp>
11#include <backendsCommon/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
Derek Lambertid466a542020-01-22 15:37:29 +000013#include <armnn/Utils.hpp>
14
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010015#include <Half.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010017#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
telsoa01c577f2c2018-08-31 09:22:23 +010018 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19 name, \
Nina Drozd69851b52018-09-21 18:42:09 +010020 armnn::NeonTimer(), \
21 armnn::WallClockTimer())
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010022
23using namespace armnn::armcomputetensorutils;
24
25namespace armnn
26{
27
28template <typename T>
29void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
30{
31 InitialiseArmComputeTensorEmpty(dstTensor);
32 CopyArmComputeITensorData(srcData, dstTensor);
33}
34
35inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
36 const ConstCpuTensorHandle* handle)
37{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010038 ARMNN_ASSERT(handle);
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010039
40 switch(handle->GetTensorInfo().GetDataType())
41 {
42 case DataType::Float16:
43 CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::Half>());
44 break;
45 case DataType::Float32:
46 CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
47 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +000048 case DataType::QAsymmU8:
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010049 CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
50 break;
Derek Lambertid466a542020-01-22 15:37:29 +000051 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar21fc28b2019-11-26 14:04:54 +000052 case DataType::QuantizedSymm8PerAxis:
Derek Lambertid466a542020-01-22 15:37:29 +000053 ARMNN_FALLTHROUGH;
54 case DataType::QSymmS8:
Sadik Armagane5d0b932020-04-09 15:48:44 +010055 case DataType::QAsymmS8:
Aron Virginas-Tar21fc28b2019-11-26 14:04:54 +000056 CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
57 break;
Derek Lambertid466a542020-01-22 15:37:29 +000058 ARMNN_NO_DEPRECATE_WARN_END
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010059 case DataType::Signed32:
60 CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
61 break;
62 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010063 ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +010064 }
65};
66
FinnWilliamsArm1fa19192019-08-02 17:26:31 +010067inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
68 const std::vector<int>& m_end,
69 const std::vector<int>& m_stride)
70{
71 arm_compute::Coordinates starts;
72 arm_compute::Coordinates ends;
73 arm_compute::Coordinates strides;
74
75 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
76
77 for (unsigned int i = 0; i < num_dims; i++)
78 {
79 unsigned int revertedIndex = num_dims - i - 1;
80
81 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
82 ends.set(i, static_cast<int>(m_end[revertedIndex]));
83 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
84 }
85
86 return std::make_tuple(starts, ends, strides);
87}
88
josh minor036f02d2019-11-15 14:53:22 -060089inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
90 const std::vector<unsigned int>& m_size)
91{
92 // This function must translate the size vector given to an end vector
93 // expected by the ACL NESlice workload
94 arm_compute::Coordinates starts;
95 arm_compute::Coordinates ends;
96
97 unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
98
99 // For strided slices, we have the relationship size = (end - begin) / stride
100 // For slice, we assume stride to be a vector of all ones, yielding the formula
101 // size = (end - begin) therefore we know end = size + begin
102 for (unsigned int i = 0; i < num_dims; i++)
103 {
104 unsigned int revertedIndex = num_dims - i - 1;
105
106 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
107 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
108 }
109
110 return std::make_tuple(starts, ends);
111}
112
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100113} //namespace armnn