blob: bb56802ba3fff93e8f14eee5366308483a68d990 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "ClSoftmaxFloatWorkload.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <cl/ClTensorHandle.hpp>
8#include <backendsCommon/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
Matthew Bentham14e46692018-09-20 15:35:30 +010010#include "ClWorkloadUtils.hpp"
11
telsoa014fcda012018-03-09 14:13:49 +000012namespace armnn
13{
14
arovir019e53a352018-08-31 15:26:35 +010015ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
surmeh013537c2c2018-05-18 16:31:43 +010016 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +010017 : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010018 , m_SoftmaxLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000019{
arovir019e53a352018-08-31 15:26:35 +010020 m_Data.ValidateInputsOutputs("ClSoftmaxFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000021
22 arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
23 arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
24 m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
25}
26
arovir019e53a352018-08-31 15:26:35 +010027void ClSoftmaxFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +000028{
arovir019e53a352018-08-31 15:26:35 +010029 ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +010030 RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
telsoa014fcda012018-03-09 14:13:49 +000031}
32
surmeh013537c2c2018-05-18 16:31:43 +010033} //namespace armnn