blob: 3f22c31c7d325404ac03358040c78885b579f961 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConstantTestImpl.hpp"
7
8#include <Permute.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010#include <ResolveType.hpp>
11
12#include <armnn/ArmNN.hpp>
13
14#include <backendsCommon/CpuTensorHandle.hpp>
15
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <test/TensorHelpers.hpp>
20
21namespace
22{
23
24template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
25LayerTestResult<T, 4> ConstantTestImpl(
26 armnn::IWorkloadFactory& workloadFactory,
27 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
28 float qScale,
29 int32_t qOffset)
30{
31 constexpr unsigned int inputWidth = 3;
32 constexpr unsigned int inputHeight = 4;
33 constexpr unsigned int inputChannels = 3;
34 constexpr unsigned int inputBatchSize = 2;
35
36 constexpr unsigned int outputWidth = inputWidth;
37 constexpr unsigned int outputHeight = inputHeight;
38 constexpr unsigned int outputChannels = inputChannels;
39 constexpr unsigned int outputBatchSize = inputBatchSize;
40
41 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
42 ArmnnType, qScale, qOffset);
43
44 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
45 ArmnnType, qScale, qOffset);
46
47 // Set quantization parameters if the requested type is a quantized type.
48 if(armnn::IsQuantizedType<T>())
49 {
50 inputTensorInfo.SetQuantizationScale(qScale);
51 inputTensorInfo.SetQuantizationOffset(qOffset);
52 outputTensorInfo.SetQuantizationScale(qScale);
53 outputTensorInfo.SetQuantizationOffset(qOffset);
54 }
55
56 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010057 armnnUtils::QuantizedVector<T>(
58 {
59 // Batch 0, Channel 0
60 235.0f, 46.0f, 178.0f,
61 100.0f, 123.0f, 19.0f,
62 172.0f, 74.0f, 250.0f,
63 6.0f, 195.0f, 80.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010064
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065 // Batch 0, Channel 1
66 113.0f, 95.0f, 202.0f,
67 77.0f, 114.0f, 71.0f,
68 122.0f, 246.0f, 166.0f,
69 82.0f, 28.0f, 37.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010070
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010071 // Batch 0, Channel 2
72 56.0f, 170.0f, 162.0f,
73 194.0f, 89.0f, 254.0f,
74 12.0f, 209.0f, 200.0f,
75 1.0f, 64.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010076
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010077 // Batch 1, Channel 0
78 67.0f, 90.0f, 49.0f,
79 7.0f, 163.0f, 18.0f,
80 25.0f, 117.0f, 103.0f,
81 247.0f, 59.0f, 189.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010082
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010083 // Batch 1, Channel 1
84 239.0f, 104.0f, 199.0f,
85 17.0f, 124.0f, 153.0f,
86 222.0f, 217.0f, 75.0f,
87 32.0f, 126.0f, 21.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010089 // Batch 1, Channel 2
90 97.0f, 145.0f, 215.0f,
91 115.0f, 116.0f, 238.0f,
92 226.0f, 16.0f, 132.0f,
93 92.0f, 125.0f, 88.0f,
94 },
95 qScale, qOffset)));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096
97 LayerTestResult<T, 4> result(outputTensorInfo);
98 result.outputExpected = input;
99
100 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
101
102 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
103 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
104
105 armnn::ConstantQueueDescriptor descriptor;
106 descriptor.m_LayerOutput = &constantTensor;
107
108 armnn::WorkloadInfo info;
109 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
110
111 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
112
113 outputHandle->Allocate();
114
115 workload->PostAllocationConfigure();
116 workload->Execute();
117
118 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
119 return result;
120}
121
122} // anonymous namespace
123
124LayerTestResult<float, 4> ConstantTest(
125 armnn::IWorkloadFactory& workloadFactory,
126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
127{
128 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
129}
130
131LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
132 armnn::IWorkloadFactory& workloadFactory,
133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
134{
135 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
136}
137
138LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
139 armnn::IWorkloadFactory& workloadFactory,
140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
141{
142 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
143}
144
145LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
146 armnn::IWorkloadFactory& workloadFactory,
147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
148{
149 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
150}
151
152LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
155{
156 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
157}