blob: 230e346a00e0df8ef7829de0239299780933576e [file] [log] [blame]
Sadik Armagan20ec2492019-05-31 09:09:44 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ClQuantizeWorkload.hpp"
7#include "ClWorkloadUtils.hpp"
8
9#include <aclCommon/ArmComputeUtils.hpp>
10#include <aclCommon/ArmComputeTensorUtils.hpp>
11
12#include <backendsCommon/CpuTensorHandle.hpp>
13
14#include <cl/ClLayerSupport.hpp>
15#include <cl/ClTensorHandle.hpp>
16#include <cl/ClLayerSupport.hpp>
17
18namespace armnn
19{
20using namespace armcomputetensorutils;
21
22arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo& input,
23 const TensorInfo& output)
24{
25 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
26 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
27
28 return arm_compute::CLQuantizationLayer::validate(&aclInputInfo,
29 &aclOutputInfo);
30}
31
32ClQuantizeWorkload::ClQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& info)
33 : BaseWorkload<QuantizeQueueDescriptor>(descriptor, info)
34{
35 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
36 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
37
38 m_Layer.configure(&input, &output);
39}
40
41void ClQuantizeWorkload::Execute() const
42{
43 ARMNN_SCOPED_PROFILING_EVENT_CL("ClQuantizeWorkload_Execute");
44 RunClFunction(m_Layer, CHECK_LOCATION());
45}
46
47} //namespace armnn