blob: 3cd9a6a5eccd990938c5a696276bec922dd87161 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#include "ClSoftmaxUint8Workload.hpp"
7#include "backends/ClTensorHandle.hpp"
8#include "backends/CpuTensorHandle.hpp"
9
10namespace armnn
11{
12
surmeh013537c2c2018-05-18 16:31:43 +010013ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
14 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000015 : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010016 , m_SoftmaxLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000017{
18 m_Data.ValidateInputsOutputs("ClSoftmaxUint8Workload", 1, 1);
19
20 arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
21 arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
22
23 const auto outputQuantization = output.info()->quantization_info();
24
25 if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
26 {
27 throw InvalidArgumentException(
28 "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
29 }
30
31 m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
32}
33
34void ClSoftmaxUint8Workload::Execute() const
35{
telsoa01c577f2c2018-08-31 09:22:23 +010036 ARMNN_SCOPED_PROFILING_EVENT_CL("ClSoftmaxUint8Workload_Execute");
telsoa014fcda012018-03-09 14:13:49 +000037
38 m_SoftmaxLayer.run();
39}
40
41} //namespace armnn