blob: 5f1eb73b61c7ecab0415e740349069a1d11e27bf [file] [log] [blame]
Nikhil Raj68c2c902019-09-19 11:21:11 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "RefArgMinMaxWorkload.hpp"
7
8#include "ArgMinMax.hpp"
9#include "RefWorkloadUtils.hpp"
10#include "Decoders.hpp"
11#include "Encoders.hpp"
12#include "Profiling.hpp"
13
14namespace armnn
15{
16RefArgMinMaxWorkload::RefArgMinMaxWorkload(
17 const ArgMinMaxQueueDescriptor& descriptor,
18 const WorkloadInfo& info)
19 : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info) {}
20
21void RefArgMinMaxWorkload::Execute() const
22{
23 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute");
24
25 const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
26
27 std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
28 Decoder<float> &decoder = *decoderPtr;
29
30 const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
31
32 int32_t* output = GetOutputTensorData<int32_t>(0, m_Data);
33
34 ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
35 m_Data.m_Parameters.m_Axis);
36}
37
38} //namespace armnn