blob: 8c535a682d0e0aaa0e829cb39916876c98b76254 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "FakeQuantizationTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
9#include <backendsCommon/CpuTensorHandle.hpp>
10
11#include <backendsCommon/test/TensorCopyUtils.hpp>
12#include <backendsCommon/test/WorkloadTestUtils.hpp>
13
14#include <test/TensorHelpers.hpp>
15
16LayerTestResult<float, 2> FakeQuantizationTest(
17 armnn::IWorkloadFactory& workloadFactory,
Finn Williams87020072020-08-26 16:19:15 +010018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
19 armnn::ITensorHandleFactory* tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010020{
Jan Eilers8eb25602020-03-09 12:13:48 +000021 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022 constexpr unsigned int width = 2;
23 constexpr unsigned int height = 3;
24
25 const armnn::TensorInfo tensorInfo({height, width },
26 armnn::DataType::Float32);
27
28 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
29 -10.0f, -5.0f,
30 0.0f, 5.0f,
31 10.0f, 10.0f
32 }));
33
34 LayerTestResult<float, 2> ret(tensorInfo);
35
Finn Williams87020072020-08-26 16:19:15 +010036 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory->CreateTensorHandle(tensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory->CreateTensorHandle(tensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010038
39 armnn::FakeQuantizationQueueDescriptor data;
40 armnn::WorkloadInfo info;
41
42 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
44
45 float min = -10.f;
46 float max = 10.f;
47
48 data.m_Parameters.m_Min = min;
49 data.m_Parameters.m_Max = max;
50
51 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
52 armnn::FakeQuantizationQueueDescriptor refData = data;
53 armnn::WorkloadInfo refInfo = info;
54 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
57
58 inputHandle->Allocate();
59 outputHandle->Allocate();
60
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
62
63 workload->PostAllocationConfigure();
64 workload->Execute();
65
66 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
67
68 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
69 0.0f, 63.0f,
70 128.0f, 191.0f,
71 255.0f, 255.0f
72 }));
73
74 return ret;
75}