blob: cff869c9b77baf76f742994241245526b68296da [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonSoftmaxUint8Workload.hpp"
7
telsoa014fcda012018-03-09 14:13:49 +00008namespace armnn
9{
surmeh013537c2c2018-05-18 16:31:43 +010010
11NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
12 const WorkloadInfo& info,
13 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000014 : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010015 , m_SoftmaxLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000016{
17 m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
18
19 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
20 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
21
22 const auto outputQuantization = output.info()->quantization_info();
23
24 if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
25 {
26 throw InvalidArgumentException(
27 "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
28 }
29
30 m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
31}
32
33void NeonSoftmaxUint8Workload::Execute() const
34{
telsoa01c577f2c2018-08-31 09:22:23 +010035 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");
telsoa014fcda012018-03-09 14:13:49 +000036
37 m_SoftmaxLayer.run();
38}
surmeh013537c2c2018-05-18 16:31:43 +010039
telsoa014fcda012018-03-09 14:13:49 +000040} //namespace armnn
41