blob: 4e428a265403465c36f67ae42005945b11f14a4f [file] [log] [blame]
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "NeonSplitterWorkload.hpp"
7
Matthew Bentham5e98b012020-01-24 23:11:43 +00008#include <arm_compute/runtime/NEON/functions/NESplit.h>
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01009
10#include <aclCommon/ArmComputeTensorUtils.hpp>
11#include <aclCommon/ArmComputeUtils.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010012#include <armnn/utility/PolymorphicDowncast.hpp>
James Conroy1f58f032021-04-27 17:13:27 +010013#include <backendsCommon/TensorHandle.hpp>
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010014#include <neon/NeonTensorHandle.hpp>
15
Matthew Bentham5e98b012020-01-24 23:11:43 +000016#include "NeonWorkloadUtils.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010017
18namespace armnn
19{
20
21using namespace armcomputetensorutils;
22
23namespace
24{
25unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
26{
27 return (numDimensions - splitAxis) - 1;
28}
29
30} //namespace
31
32arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo& input,
33 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
34 unsigned int splitAxis)
35{
36 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
37
38 size_t numOutputs = outputs.size();
39
40 std::vector<arm_compute::TensorInfo> aclOutputs;
41 aclOutputs.reserve(numOutputs);
42
43 std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
44 aclOutputPtr.reserve(numOutputs);
45
46 for (size_t i = 0u; i < outputs.size(); ++i)
47 {
48 aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
49 aclOutputPtr.emplace_back(&aclOutputs.back());
50 }
51
52 unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
53 return arm_compute::NESplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
54}
55
56NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info)
57 : BaseWorkload<SplitterQueueDescriptor>(descriptor, info)
58{
59 bool allOutputsAreSubtensors = true;
60
61 // Check that all outputs are sub-tensors
62 for (auto output : m_Data.m_Outputs)
63 {
64 if (output && !output->GetParent())
65 {
66 // Non sub-tensor input found so we need to execute the split function
67 allOutputsAreSubtensors = false;
68 break;
69 }
70 }
71
72 if (allOutputsAreSubtensors)
73 {
74 // Can skip configuring the split function since it's not executed
75 return;
76 }
77
Jan Eilersbb446e52020-04-02 13:56:54 +010078 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010079
80 std::vector<arm_compute::ITensor *> aclOutputs;
81 for (auto output : m_Data.m_Outputs)
82 {
Jan Eilers3c9e0452020-04-10 13:00:44 +010083 arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010084 aclOutputs.emplace_back(&aclOutput);
85 }
86
87 // Create the layer function
Matthew Bentham5e98b012020-01-24 23:11:43 +000088 std::unique_ptr<arm_compute::NESplit> layer(new arm_compute::NESplit());
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010089
90 // Configure input and output tensors
91 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
92 if (splitAxis.size() != 1)
93 {
94 throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
95 }
96
97 unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
Matthew Bentham5e98b012020-01-24 23:11:43 +000098 layer->configure(&input, aclOutputs, aclAxis);
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010099
100 // Prepare
Matthew Bentham5e98b012020-01-24 23:11:43 +0000101 layer->prepare();
102 m_Layer.reset(layer.release());
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100103}
104
105void NeonSplitterWorkload::Execute() const
106{
107 if (m_Layer)
108 {
109 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSplitterWorkload_Execute");
110 m_Layer->run();
111 }
112}
113
114} //namespace armnn
115