Laurent Carlier | 749294b | 2020-06-01 09:03:17 +0100 | [diff] [blame] | 1 | // |
Matthew Sloyan | 2d213a7 | 2022-06-30 17:13:04 +0100 | [diff] [blame] | 2 | // Copyright © 2022 Arm Ltd and Contributors. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 5 | |
Aron Virginas-Tar | d4f0fea | 2019-04-09 14:08:06 +0100 | [diff] [blame] | 6 | #include <ResolveType.hpp> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 7 | |
Colm Donelan | 0c47974 | 2021-12-10 12:43:54 +0000 | [diff] [blame] | 8 | #include "WorkloadUtils.hpp" |
| 9 | |
| 10 | #include <armnn/backends/MemCopyWorkload.hpp> |
| 11 | #include <armnn/backends/TensorHandle.hpp> |
Matteo Martincigh | e5b8eb9 | 2019-11-28 15:45:42 +0000 | [diff] [blame] | 12 | |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 13 | #include <armnn/utility/PolymorphicDowncast.hpp> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 14 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 15 | #include <cstring> |
| 16 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 17 | namespace armnn |
| 18 | { |
| 19 | |
| 20 | namespace |
| 21 | { |
| 22 | |
| 23 | template <typename SrcTensorHandleType, typename DstTensorHandleType> |
| 24 | void GatherTensorHandlePairs(const MemCopyQueueDescriptor& descriptor, |
| 25 | std::vector<std::pair<SrcTensorHandleType*, DstTensorHandleType*>>& tensorHandlePairs) |
| 26 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 27 | const unsigned int numInputs = static_cast<unsigned int>(descriptor.m_Inputs.size()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 28 | tensorHandlePairs.reserve(numInputs); |
| 29 | |
| 30 | for (unsigned int i = 0; i < numInputs; ++i) |
| 31 | { |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 32 | SrcTensorHandleType* const srcTensorHandle = PolymorphicDowncast<SrcTensorHandleType*>( |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 33 | descriptor.m_Inputs[i]); |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 34 | DstTensorHandleType* const dstTensorHandle = PolymorphicDowncast<DstTensorHandleType*>( |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 35 | descriptor.m_Outputs[i]); |
| 36 | |
| 37 | tensorHandlePairs.emplace_back(srcTensorHandle, dstTensorHandle); |
| 38 | } |
| 39 | } |
| 40 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 41 | } //namespace |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 42 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 43 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 44 | CopyMemGenericWorkload::CopyMemGenericWorkload(const MemCopyQueueDescriptor& descriptor, |
Mike Kelly | 386ff1a | 2021-03-29 15:04:50 +0100 | [diff] [blame] | 45 | const WorkloadInfo& info) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 46 | : BaseWorkload<MemCopyQueueDescriptor>(descriptor, info) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 47 | { |
| 48 | GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); |
| 49 | } |
| 50 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 51 | void CopyMemGenericWorkload::Execute() const |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 52 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 53 | ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CopyMemGeneric_Execute"); |
| 54 | |
| 55 | auto copyFunc = [](void* dst, const void* src, size_t size) |
| 56 | { |
| 57 | memcpy(dst, src, size); |
| 58 | }; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 59 | |
| 60 | for (const auto& pair : m_TensorHandlePairs) |
| 61 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 62 | CopyTensorContentsGeneric(pair.first, pair.second, copyFunc); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 63 | } |
| 64 | } |
| 65 | |
Matthew Sloyan | 2d213a7 | 2022-06-30 17:13:04 +0100 | [diff] [blame] | 66 | void CopyMemGenericWorkload::ExecuteAsync(ExecutionData& executionData) |
Mike Kelly | 386ff1a | 2021-03-29 15:04:50 +0100 | [diff] [blame] | 67 | { |
| 68 | ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CopyMemGeneric_Execute_WorkingMemDescriptor"); |
Matthew Sloyan | 2d213a7 | 2022-06-30 17:13:04 +0100 | [diff] [blame] | 69 | |
| 70 | WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); |
Mike Kelly | 386ff1a | 2021-03-29 15:04:50 +0100 | [diff] [blame] | 71 | std::vector<TensorHandlePair> tensorHandlePairs; |
Matthew Sloyan | 2d213a7 | 2022-06-30 17:13:04 +0100 | [diff] [blame] | 72 | |
| 73 | GatherTensorHandlePairs(*workingMemDescriptor, tensorHandlePairs); |
Mike Kelly | 386ff1a | 2021-03-29 15:04:50 +0100 | [diff] [blame] | 74 | |
| 75 | auto copyFunc = [](void* dst, const void* src, size_t size) |
| 76 | { |
| 77 | memcpy(dst, src, size); |
| 78 | }; |
| 79 | |
| 80 | for (const auto& pair : tensorHandlePairs) |
| 81 | { |
| 82 | CopyTensorContentsGeneric(pair.first, pair.second, copyFunc); |
| 83 | } |
| 84 | } |
| 85 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 86 | } //namespace armnn |