blob: c3cacd58101c15388e40907ce39e3060933e1217 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConstantTestImpl.hpp"
7
8#include <Permute.hpp>
9#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14
15#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
19
20namespace
21{
22
23template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
24LayerTestResult<T, 4> ConstantTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 float qScale,
28 int32_t qOffset)
29{
30 constexpr unsigned int inputWidth = 3;
31 constexpr unsigned int inputHeight = 4;
32 constexpr unsigned int inputChannels = 3;
33 constexpr unsigned int inputBatchSize = 2;
34
35 constexpr unsigned int outputWidth = inputWidth;
36 constexpr unsigned int outputHeight = inputHeight;
37 constexpr unsigned int outputChannels = inputChannels;
38 constexpr unsigned int outputBatchSize = inputBatchSize;
39
40 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
41 ArmnnType, qScale, qOffset);
42
43 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
44 ArmnnType, qScale, qOffset);
45
46 // Set quantization parameters if the requested type is a quantized type.
47 if(armnn::IsQuantizedType<T>())
48 {
49 inputTensorInfo.SetQuantizationScale(qScale);
50 inputTensorInfo.SetQuantizationOffset(qOffset);
51 outputTensorInfo.SetQuantizationScale(qScale);
52 outputTensorInfo.SetQuantizationOffset(qOffset);
53 }
54
55 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
56 QuantizedVector<T>(qScale, qOffset, {
57 // Batch 0, Channel 0
58 235.0f, 46.0f, 178.0f,
59 100.0f, 123.0f, 19.0f,
60 172.0f, 74.0f, 250.0f,
61 6.0f, 195.0f, 80.0f,
62
63 // Batch 0, Channel 1
64 113.0f, 95.0f, 202.0f,
65 77.0f, 114.0f, 71.0f,
66 122.0f, 246.0f, 166.0f,
67 82.0f, 28.0f, 37.0f,
68
69 // Batch 0, Channel 2
70 56.0f, 170.0f, 162.0f,
71 194.0f, 89.0f, 254.0f,
72 12.0f, 209.0f, 200.0f,
73 1.0f, 64.0f, 54.0f,
74
75 // Batch 1, Channel 0
76 67.0f, 90.0f, 49.0f,
77 7.0f, 163.0f, 18.0f,
78 25.0f, 117.0f, 103.0f,
79 247.0f, 59.0f, 189.0f,
80
81 // Batch 1, Channel 1
82 239.0f, 104.0f, 199.0f,
83 17.0f, 124.0f, 153.0f,
84 222.0f, 217.0f, 75.0f,
85 32.0f, 126.0f, 21.0f,
86
87 // Batch 1, Channel 2
88 97.0f, 145.0f, 215.0f,
89 115.0f, 116.0f, 238.0f,
90 226.0f, 16.0f, 132.0f,
91 92.0f, 125.0f, 88.0f,
92 })));
93
94 LayerTestResult<T, 4> result(outputTensorInfo);
95 result.outputExpected = input;
96
97 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
98
99 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
100 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
101
102 armnn::ConstantQueueDescriptor descriptor;
103 descriptor.m_LayerOutput = &constantTensor;
104
105 armnn::WorkloadInfo info;
106 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
107
108 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
109
110 outputHandle->Allocate();
111
112 workload->PostAllocationConfigure();
113 workload->Execute();
114
115 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
116 return result;
117}
118
119} // anonymous namespace
120
121LayerTestResult<float, 4> ConstantTest(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
124{
125 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
126}
127
128LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
129 armnn::IWorkloadFactory& workloadFactory,
130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
131{
132 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
133}
134
135LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
136 armnn::IWorkloadFactory& workloadFactory,
137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
138{
139 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
140}
141
142LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
143 armnn::IWorkloadFactory& workloadFactory,
144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
145{
146 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
147}
148
149LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
150 armnn::IWorkloadFactory& workloadFactory,
151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
152{
153 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
154}