blob: a66b0343ffbf00baa27a314f88bd4d37db100bd9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#include "NeonSoftmaxUint8Workload.hpp"
7
8
9
10namespace armnn
11{
12NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info)
13 : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
14{
15 m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
16
17 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
18 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
19
20 const auto outputQuantization = output.info()->quantization_info();
21
22 if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
23 {
24 throw InvalidArgumentException(
25 "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
26 }
27
28 m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
29}
30
31void NeonSoftmaxUint8Workload::Execute() const
32{
33 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "ClSoftmaxUint8Workload_Execute");
34
35 m_SoftmaxLayer.run();
36}
37} //namespace armnn
38