blob: e0f8a35d0a0900b5bdbd4462468a5a7e30e95653 [file] [log] [blame]
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "WorkloadTestUtils.hpp"
8
9#include <armnn/ArmNN.hpp>
10#include <armnn/Tensor.hpp>
11#include <armnn/TypesUtils.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14#include <backendsCommon/IBackendInternal.hpp>
15#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19namespace
20{
21
22template<typename T, std::size_t Dim>
23LayerTestResult<T, Dim> DebugTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 armnn::TensorInfo& inputTensorInfo,
27 armnn::TensorInfo& outputTensorInfo,
28 std::vector<float>& inputData,
29 std::vector<float>& outputExpectedData,
30 armnn::DebugQueueDescriptor descriptor,
31 const std::string expectedStringOutput,
32 const float qScale = 1.0f,
33 const int32_t qOffset = 0)
34{
35 if(armnn::IsQuantizedType<T>())
36 {
37 inputTensorInfo.SetQuantizationScale(qScale);
38 inputTensorInfo.SetQuantizationOffset(qOffset);
39
40 outputTensorInfo.SetQuantizationScale(qScale);
41 outputTensorInfo.SetQuantizationOffset(qOffset);
42 }
43
44 boost::multi_array<T, Dim> input =
45 MakeTensor<T, Dim>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
46
47 LayerTestResult<T, Dim> ret(outputTensorInfo);
48 ret.outputExpected =
49 MakeTensor<T, Dim>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
50
51 std::unique_ptr<armnn::ITensorHandle> inputHandle =
52 workloadFactory.CreateTensorHandle(inputTensorInfo);
53
54 std::unique_ptr<armnn::ITensorHandle> outputHandle =
55 workloadFactory.CreateTensorHandle(outputTensorInfo);
56
57 armnn::WorkloadInfo info;
58 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
59 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
60
61 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDebug(descriptor, info);
62
63 inputHandle->Allocate();
64 outputHandle->Allocate();
65
66 CopyDataToITensorHandle(inputHandle.get(), input.data());
67
68 std::ostringstream oss;
69 std::streambuf* coutStreambuf = std::cout.rdbuf();
70 std::cout.rdbuf(oss.rdbuf());
71
72 ExecuteWorkload(*workload, memoryManager);
73
74 std::cout.rdbuf(coutStreambuf);
75
76 BOOST_TEST(oss.str() == expectedStringOutput);
77
78 CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
79
80 return ret;
81}
82
83template <typename T>
84LayerTestResult<T, 4> Debug4DTest(
85 armnn::IWorkloadFactory& workloadFactory,
86 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
87{
88 armnn::TensorInfo inputTensorInfo;
89 armnn::TensorInfo outputTensorInfo;
90
91 unsigned int inputShape[] = {1, 2, 2, 3};
92 unsigned int outputShape[] = {1, 2, 2, 3};
93
94 armnn::DebugQueueDescriptor desc;
95 desc.m_Parameters.m_LayerName = "TestOutput";
96 desc.m_Parameters.m_SlotIndex = 1;
97
98 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
99 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
100
101 std::vector<float> input = std::vector<float>(
102 {
103 1.0f, 2.0f, 3.0f,
104 4.0f, 5.0f, 6.0f,
105 7.0f, 8.0f, 9.0f,
106 10.0f, 11.0f, 12.0f,
107 });
108
109 std::vector<float> outputExpected = std::vector<float>(
110 {
111 1.0f, 2.0f, 3.0f,
112 4.0f, 5.0f, 6.0f,
113 7.0f, 8.0f, 9.0f,
114 10.0f, 11.0f, 12.0f,
115 });
116
117 const std::string expectedStringOutput =
118 "{ \"layer\": \"TestOutput\","
119 " \"outputSlot\": 1,"
120 " \"shape\": [1, 2, 2, 3],"
121 " \"min\": 1, \"max\": 12,"
122 " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
123
124 return DebugTestImpl<T, 4>(workloadFactory,
125 memoryManager,
126 inputTensorInfo,
127 outputTensorInfo,
128 input,
129 outputExpected,
130 desc,
131 expectedStringOutput);
132}
133
134template <typename T>
135LayerTestResult<T, 3> Debug3DTest(
136 armnn::IWorkloadFactory& workloadFactory,
137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
138{
139 armnn::TensorInfo inputTensorInfo;
140 armnn::TensorInfo outputTensorInfo;
141
142 unsigned int inputShape[] = {3, 3, 1};
143 unsigned int outputShape[] = {3, 3, 1};
144
145 armnn::DebugQueueDescriptor desc;
146 desc.m_Parameters.m_LayerName = "TestOutput";
147
148 inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
149 outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
150
151 std::vector<float> input = std::vector<float>(
152 {
153 1.0f, 2.0f, 3.0f,
154 4.0f, 5.0f, 6.0f,
155 7.0f, 8.0f, 9.0f,
156 });
157
158 std::vector<float> outputExpected = std::vector<float>(
159 {
160 1.0f, 2.0f, 3.0f,
161 4.0f, 5.0f, 6.0f,
162 7.0f, 8.0f, 9.0f,
163 });
164
165 const std::string expectedStringOutput =
166 "{ \"layer\": \"TestOutput\","
167 " \"outputSlot\": 0,"
168 " \"shape\": [3, 3, 1],"
169 " \"min\": 1, \"max\": 9,"
170 " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
171
172 return DebugTestImpl<T, 3>(workloadFactory,
173 memoryManager,
174 inputTensorInfo,
175 outputTensorInfo,
176 input,
177 outputExpected,
178 desc,
179 expectedStringOutput);
180}
181
182template <typename T>
183LayerTestResult<T, 2> Debug2DTest(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
186{
187 armnn::TensorInfo inputTensorInfo;
188 armnn::TensorInfo outputTensorInfo;
189
190 unsigned int inputShape[] = {2, 2};
191 unsigned int outputShape[] = {2, 2};
192
193 armnn::DebugQueueDescriptor desc;
194 desc.m_Parameters.m_LayerName = "TestOutput";
195
196 inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
197 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
198
199 std::vector<float> input = std::vector<float>(
200 {
201 1.0f, 2.0f,
202 3.0f, 4.0f,
203 });
204
205 std::vector<float> outputExpected = std::vector<float>(
206 {
207 1.0f, 2.0f,
208 3.0f, 4.0f,
209 });
210
211 const std::string expectedStringOutput =
212 "{ \"layer\": \"TestOutput\","
213 " \"outputSlot\": 0,"
214 " \"shape\": [2, 2],"
215 " \"min\": 1, \"max\": 4,"
216 " \"data\": [[1, 2], [3, 4]] }\n";
217
218 return DebugTestImpl<T, 2>(workloadFactory,
219 memoryManager,
220 inputTensorInfo,
221 outputTensorInfo,
222 input,
223 outputExpected,
224 desc,
225 expectedStringOutput);
226}
227
228template <typename T>
229LayerTestResult<T, 1> Debug1DTest(
230 armnn::IWorkloadFactory& workloadFactory,
231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
232{
233 armnn::TensorInfo inputTensorInfo;
234 armnn::TensorInfo outputTensorInfo;
235
236 unsigned int inputShape[] = {4};
237 unsigned int outputShape[] = {4};
238
239 armnn::DebugQueueDescriptor desc;
240 desc.m_Parameters.m_LayerName = "TestOutput";
241
242 inputTensorInfo = armnn::TensorInfo(1, inputShape, armnn::GetDataType<T>());
243 outputTensorInfo = armnn::TensorInfo(1, outputShape, armnn::GetDataType<T>());
244
245 std::vector<float> input = std::vector<float>(
246 {
247 1.0f, 2.0f, 3.0f, 4.0f,
248 });
249
250 std::vector<float> outputExpected = std::vector<float>(
251 {
252 1.0f, 2.0f, 3.0f, 4.0f,
253 });
254
255 const std::string expectedStringOutput =
256 "{ \"layer\": \"TestOutput\","
257 " \"outputSlot\": 0,"
258 " \"shape\": [4],"
259 " \"min\": 1, \"max\": 4,"
260 " \"data\": [1, 2, 3, 4] }\n";
261
262 return DebugTestImpl<T, 1>(workloadFactory,
263 memoryManager,
264 inputTensorInfo,
265 outputTensorInfo,
266 input,
267 outputExpected,
268 desc,
269 expectedStringOutput);
270}
271
272} // anonymous namespace