blob: 1ce9d2dac7fe20a55f8d14fa7f8a06f058b040ec [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "FakeQuantizationTestImpl.hpp"
7
8#include <armnn/ArmNN.hpp>
9
10#include <backendsCommon/CpuTensorHandle.hpp>
11
12#include <backendsCommon/test/TensorCopyUtils.hpp>
13#include <backendsCommon/test/WorkloadTestUtils.hpp>
14
15#include <test/TensorHelpers.hpp>
16
17LayerTestResult<float, 2> FakeQuantizationTest(
18 armnn::IWorkloadFactory& workloadFactory,
19 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
20{
21 constexpr unsigned int width = 2;
22 constexpr unsigned int height = 3;
23
24 const armnn::TensorInfo tensorInfo({height, width },
25 armnn::DataType::Float32);
26
27 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
28 -10.0f, -5.0f,
29 0.0f, 5.0f,
30 10.0f, 10.0f
31 }));
32
33 LayerTestResult<float, 2> ret(tensorInfo);
34
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
37
38 armnn::FakeQuantizationQueueDescriptor data;
39 armnn::WorkloadInfo info;
40
41 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
42 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
43
44 float min = -10.f;
45 float max = 10.f;
46
47 data.m_Parameters.m_Min = min;
48 data.m_Parameters.m_Max = max;
49
50 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
51 armnn::FakeQuantizationQueueDescriptor refData = data;
52 armnn::WorkloadInfo refInfo = info;
53 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
54
55 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
56
57 inputHandle->Allocate();
58 outputHandle->Allocate();
59
60 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
61
62 workload->PostAllocationConfigure();
63 workload->Execute();
64
65 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
66
67 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
68 0.0f, 63.0f,
69 128.0f, 191.0f,
70 255.0f, 255.0f
71 }));
72
73 return ret;
74}