blob: e54ab456cdf5c2e585bcf07ab68e52249f39b3f7 [file] [log] [blame]
Derek Lamberti5f400d62019-03-25 15:41:58 +00001//
Matthew Sloyan2d213a72022-06-30 17:13:04 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Derek Lamberti5f400d62019-03-25 15:41:58 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "RefQuantizeWorkload.hpp"
7
Keith Davis5e51cd82020-01-29 16:52:59 +00008#include "RefWorkloadUtils.hpp"
9
Derek Lamberti5f400d62019-03-25 15:41:58 +000010#include <armnn/TypesUtils.hpp>
11
12
13namespace armnn
14{
15
16namespace
17{
18
Keith Davis5e51cd82020-01-29 16:52:59 +000019void QuantizeImpl(Decoder<float>& in, Encoder<float>& out, size_t numValues)
Derek Lamberti5f400d62019-03-25 15:41:58 +000020{
Keith Davis5e51cd82020-01-29 16:52:59 +000021 for (unsigned int i = 0; i < numValues; i++)
Derek Lamberti5f400d62019-03-25 15:41:58 +000022 {
Keith Davis5e51cd82020-01-29 16:52:59 +000023 in[i];
24 out[i];
25 out.Set(in.Get());
Derek Lamberti5f400d62019-03-25 15:41:58 +000026 }
27}
28
29} //namespace
30
31RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info)
Finn Williams73c547d2022-02-15 20:47:34 +000032 : RefBaseWorkload(descriptor, info)
Derek Lamberti5f400d62019-03-25 15:41:58 +000033 , m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
Derek Lamberti5f400d62019-03-25 15:41:58 +000034{
35}
36
37void RefQuantizeWorkload::Execute() const
38{
Finn Williamsb8181f72021-04-07 10:23:21 +010039 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
40}
Derek Lamberti5f400d62019-03-25 15:41:58 +000041
Matthew Sloyan2d213a72022-06-30 17:13:04 +010042void RefQuantizeWorkload::ExecuteAsync(ExecutionData& executionData)
Finn Williamsb8181f72021-04-07 10:23:21 +010043{
Matthew Sloyan2d213a72022-06-30 17:13:04 +010044 WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
45 Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
Finn Williamsb8181f72021-04-07 10:23:21 +010046}
47
48void RefQuantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
49{
50 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
51 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
52
53 QuantizeImpl(*inputDecoder, *outputEncoder, m_NumElements);
Derek Lamberti5f400d62019-03-25 15:41:58 +000054}
55
56} //namespace armnn