blob: d25673b18ba615b5a4843b7cce855fc85e491003 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "FakeQuantizationTestImpl.hpp"
7
8#include <armnn/ArmNN.hpp>
9
10#include <backendsCommon/CpuTensorHandle.hpp>
11
12#include <backendsCommon/test/TensorCopyUtils.hpp>
13#include <backendsCommon/test/WorkloadTestUtils.hpp>
14
15#include <test/TensorHelpers.hpp>
16
17LayerTestResult<float, 2> FakeQuantizationTest(
18 armnn::IWorkloadFactory& workloadFactory,
19 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
20{
Derek Lambertic374ff02019-12-10 21:57:35 +000021 boost::ignore_unused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022 constexpr unsigned int width = 2;
23 constexpr unsigned int height = 3;
24
25 const armnn::TensorInfo tensorInfo({height, width },
26 armnn::DataType::Float32);
27
28 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
29 -10.0f, -5.0f,
30 0.0f, 5.0f,
31 10.0f, 10.0f
32 }));
33
34 LayerTestResult<float, 2> ret(tensorInfo);
35
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
38
39 armnn::FakeQuantizationQueueDescriptor data;
40 armnn::WorkloadInfo info;
41
42 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
44
45 float min = -10.f;
46 float max = 10.f;
47
48 data.m_Parameters.m_Min = min;
49 data.m_Parameters.m_Max = max;
50
51 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
52 armnn::FakeQuantizationQueueDescriptor refData = data;
53 armnn::WorkloadInfo refInfo = info;
54 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
57
58 inputHandle->Allocate();
59 outputHandle->Allocate();
60
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
62
63 workload->PostAllocationConfigure();
64 workload->Execute();
65
66 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
67
68 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
69 0.0f, 63.0f,
70 128.0f, 191.0f,
71 255.0f, 255.0f
72 }));
73
74 return ret;
75}