blob: 606005659f28c369172e15a6016f6c55585ea893 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "ClSoftmaxFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +01007#include <backends/cl/ClTensorHandle.hpp>
8#include <backends/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
Matthew Bentham14e46692018-09-20 15:35:30 +010010#include "ClWorkloadUtils.hpp"
11
telsoa014fcda012018-03-09 14:13:49 +000012namespace armnn
13{
14
arovir019e53a352018-08-31 15:26:35 +010015ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
surmeh013537c2c2018-05-18 16:31:43 +010016 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +010017 : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010018 , m_SoftmaxLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000019{
arovir019e53a352018-08-31 15:26:35 +010020 m_Data.ValidateInputsOutputs("ClSoftmaxFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000021
22 arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
23 arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
24 m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
25}
26
arovir019e53a352018-08-31 15:26:35 +010027void ClSoftmaxFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +000028{
arovir019e53a352018-08-31 15:26:35 +010029 ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxFloatWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +000030 m_SoftmaxLayer.run();
31}
32
surmeh013537c2c2018-05-18 16:31:43 +010033} //namespace armnn