blob: d01919c09d83b768e14a35f9a7195ecba6cb5086 [file] [log] [blame]
Sadik Armagana097d2a2021-11-24 15:47:28 +00001//
2// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "TestUtils.hpp"
8
9#include <Graph.hpp>
10#include <Network.hpp>
11#include <ResolveType.hpp>
12
13#include <armnnUtils/DataLayoutIndexed.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000014#include <armnn/backends/TensorHandle.hpp>
15#include <armnn/backends/WorkloadData.hpp>
16#include <armnn/backends/WorkloadFactory.hpp>
Sadik Armagana097d2a2021-11-24 15:47:28 +000017#include <armnn/utility/Assert.hpp>
18#include <armnn/utility/IgnoreUnused.hpp>
19#include <armnn/utility/PolymorphicDowncast.hpp>
20
Sadik Armagana097d2a2021-11-24 15:47:28 +000021#include <doctest/doctest.h>
22
23#include <utility>
24
25using namespace armnn;
26
27namespace
28{
29
30using namespace std;
31
32// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
33template<typename Workload>
34std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer,
35 const IWorkloadFactory& factory,
36 const ModelOptions& modelOptions = {})
37{
38 std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
39 CHECK_MESSAGE(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
40 "Cannot convert to derived class");
41 std::string reasonIfUnsupported;
42 layer.SetBackendId(factory.GetBackendId());
43 CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
44 return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
45}
46
47// Helper function to create tensor handlers for workloads, assuming they all use the same factory.
48void CreateTensorHandles(armnn::Graph& graph,
49 armnn::IWorkloadFactory& factory)
50{
51 TensorHandleFactoryRegistry tmpRegistry;
52 for (auto&& layer : graph.TopologicalSort())
53 {
54 layer->CreateTensorHandles(tmpRegistry, factory);
55 }
56}
57
58/////////////////////////////////////////////////////////////////////////////////////////////
59// The following functions are called by backendsCommon/test/CreateWorkload*.cpp
60// They build very simple graphs, and then create a workload.
61// Some checks are performed on the workload to ensure parameters have been passed correctly.
62// They return the created workloads so that backend-specific checks can be performed.
63/////////////////////////////////////////////////////////////////////////////////////////////
64
65template <typename ActivationWorkload, armnn::DataType DataType>
66std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
67 armnn::Graph& graph)
68{
69 // Creates the layer we're testing.
70 ActivationDescriptor layerDesc;
Teresa Charlin98b0dcb2022-01-18 22:09:29 +000071 layerDesc.m_Function = ActivationFunction::ReLu;
Sadik Armagana097d2a2021-11-24 15:47:28 +000072 layerDesc.m_A = 3.5f;
73 layerDesc.m_B = -10.0f;
74
75 ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
76
77 // Creates extra layers.
78 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
79 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
80
81 // Connects up.
82 armnn::TensorInfo tensorInfo({1, 1}, DataType);
83
84 Connect(input, layer, tensorInfo);
85 Connect(layer, output, tensorInfo);
86
87 CreateTensorHandles(graph, factory);
88
89 // Makes the workload and checks it.
90 auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
91
92 ActivationQueueDescriptor queueDescriptor = workload->GetData();
93 CHECK(queueDescriptor.m_Inputs.size() == 1);
94 CHECK(queueDescriptor.m_Outputs.size() == 1);
95 CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
96 CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
Teresa Charlin98b0dcb2022-01-18 22:09:29 +000097 CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::ReLu));
Sadik Armagana097d2a2021-11-24 15:47:28 +000098
99 // Returns so we can do extra, backend-specific tests.
100 return workload;
101}
102
103template <typename WorkloadType,
104 typename DescriptorType,
105 typename LayerType,
106 armnn::DataType DataType>
107std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFactory & factory,
108 armnn::Graph & graph)
109{
110 // Creates the layer we're testing.
111 Layer* const layer = graph.AddLayer<LayerType>("layer");
112
113 // Creates extra layers.
114 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
115 Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
116 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
117
118 // Connects up.
119 armnn::TensorInfo tensorInfo({2, 3}, DataType);
120 Connect(input1, layer, tensorInfo, 0, 0);
121 Connect(input2, layer, tensorInfo, 0, 1);
122 Connect(layer, output, tensorInfo);
123 CreateTensorHandles(graph, factory);
124
125 // Makes the workload and checks it.
126 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
127
128 DescriptorType queueDescriptor = workload->GetData();
129 CHECK(queueDescriptor.m_Inputs.size() == 2);
130 CHECK(queueDescriptor.m_Outputs.size() == 1);
131
132 // Returns so we can do extra, backend-specific tests.
133 return workload;
134}
135
136template<typename WorkloadType,
137 typename DescriptorType,
138 armnn::DataType DataType>
139std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
140 armnn::Graph& graph)
141{
142 // Creates the layer we're testing.
143 SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
144
145 auto activationDesc = std::make_shared<ActivationDescriptor>();
146 activationDesc->m_A = 10.0f;
147 activationDesc->m_B = 5.0f;
148 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
149
150 layer->SetAdditionalInfoForObject(activationDesc);
151
152 // Creates extra layers.
153 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
154 Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
155 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
156
157 // Connects up.
158 armnn::TensorInfo tensorInfo({2, 3}, DataType);
159 Connect(input1, layer, tensorInfo, 0, 0);
160 Connect(input2, layer, tensorInfo, 0, 1);
161 Connect(layer, output, tensorInfo);
162 CreateTensorHandles(graph, factory);
163
164 // Check that the additional information can be queried from the layer
165 std::shared_ptr<ActivationDescriptor>
166 activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
167
168 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
169 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
170 ARMNN_ASSERT(
171 static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
172 );
173
174 // Makes the workload and checks it.
175 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
176
177 DescriptorType queueDescriptor = workload->GetData();
178
179 const ActivationDescriptor* queueDescBlobPtr =
180 queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
181 IgnoreUnused(queueDescBlobPtr);
182 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
183 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
184 ARMNN_ASSERT(
185 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
186 );
187
188 CHECK(queueDescriptor.m_Inputs.size() == 2);
189 CHECK(queueDescriptor.m_Outputs.size() == 1);
190
191 return workload;
192}
193
194template<typename WorkloadType,
195 typename DescriptorType,
196 armnn::DataType DataType>
197std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
198 armnn::Graph& graph)
199{
200 // Creates the layer we're testing.
201 MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
202
203 auto activationDesc = std::make_shared<ActivationDescriptor>();
204 activationDesc->m_A = 10.0f;
205 activationDesc->m_B = 5.0f;
206 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
207
208 layer->SetAdditionalInfoForObject(activationDesc);
209
210 // Creates extra layers.
211 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
212 Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
213 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
214
215 // Connects up.
216 armnn::TensorInfo tensorInfo({2, 3}, DataType);
217 Connect(input1, layer, tensorInfo, 0, 0);
218 Connect(input2, layer, tensorInfo, 0, 1);
219 Connect(layer, output, tensorInfo);
220 CreateTensorHandles(graph, factory);
221
222 // Check that the additional information can be queried from the layer
223 std::shared_ptr<ActivationDescriptor>
224 activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
225
226 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
227 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
228 ARMNN_ASSERT(
229 static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
230 );
231
232 // Makes the workload and checks it.
233 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
234
235 DescriptorType queueDescriptor = workload->GetData();
236 CHECK(queueDescriptor.m_Inputs.size() == 2);
237 CHECK(queueDescriptor.m_Outputs.size() == 1);
238 const ActivationDescriptor* queueDescBlobPtr =
239 queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
240 IgnoreUnused(queueDescBlobPtr);
241 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
242 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
243 ARMNN_ASSERT(
244 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
245 );
246
247 return workload;// Returns so we can do extra, backend-specific tests.
248}
249
250template<typename WorkloadType,
251 typename DescriptorType,
252 armnn::DataType DataType>
253std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
254 armnn::Graph& graph)
255{
256 // Creates the layer we're testing.
257 AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
258
259 auto activationDesc = std::make_shared<ActivationDescriptor>();
260 activationDesc->m_A = 10.0f;
261 activationDesc->m_B = 5.0f;
262 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
263
264 layer->SetAdditionalInfoForObject(activationDesc);
265
266 // Creates extra layers.
267 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
268 Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
269 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
270
271 // Connects up.
272 armnn::TensorInfo tensorInfo({2, 3}, DataType);
273 Connect(input1, layer, tensorInfo, 0, 0);
274 Connect(input2, layer, tensorInfo, 0, 1);
275 Connect(layer, output, tensorInfo);
276 CreateTensorHandles(graph, factory);
277
278 // Check that the additional information can be queried from the layer
279 std::shared_ptr<ActivationDescriptor>
280 activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
281
282 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
283 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
284 ARMNN_ASSERT(
285 static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
286 );
287
288 // Makes the workload and checks it.
289 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
290
291 DescriptorType queueDescriptor = workload->GetData();
292 const ActivationDescriptor* queueDescBlobPtr =
293 queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
294 IgnoreUnused(queueDescBlobPtr);
295 CHECK(queueDescriptor.m_Inputs.size() == 2);
296 CHECK(queueDescriptor.m_Outputs.size() == 1);
297 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
298 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
299 ARMNN_ASSERT(
300 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
301 );
302
303 return workload;
304}
305
306template <typename WorkloadType,
307 typename DescriptorType,
308 armnn::DataType DataType>
309std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
310 armnn::Graph & graph,
311 armnn::UnaryOperation op)
312{
313 ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op);
314 Layer* const layer = graph.AddLayer<armnn::ElementwiseUnaryLayer>(desc, "layer");
315
316 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
317 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
318
319 armnn::TensorInfo tensorInfo({ 2, 3 }, DataType);
320 Connect(input, layer, tensorInfo, 0, 0);
321 Connect(layer, output, tensorInfo, 0, 0);
322 CreateTensorHandles(graph, factory);
323
324 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
325 DescriptorType queueDescriptor = workload->GetData();
326
327 CHECK(queueDescriptor.m_Inputs.size() == 1);
328 CHECK(queueDescriptor.m_Outputs.size() == 1);
329
330 return workload;
331}
332
333template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
334std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkloadTest(
335 armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
336{
337 TensorShape tensorShape;
338 switch (dataLayout)
339 {
340 case DataLayout::NHWC:
341 tensorShape = { 2, 4, 4, 3 };
342 break;
343 case DataLayout::NCHW:
344 default:
345 tensorShape = { 2, 3, 4, 4 };
346 }
347
348 // Creates the layer we're testing.
349 BatchNormalizationDescriptor layerDesc;
350 layerDesc.m_Eps = 0.05f;
351 layerDesc.m_DataLayout = dataLayout;
352
353 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
354
355 armnn::TensorInfo weightInfo({3}, DataType);
356 layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
357 layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
358 layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
359 layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
360 layer->m_Mean->Allocate();
361 layer->m_Variance->Allocate();
362 layer->m_Beta->Allocate();
363 layer->m_Gamma->Allocate();
364
365 // Creates extra layers.
366 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
367 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
368
369 // Connects up.
370 armnn::TensorInfo tensorInfo(tensorShape, DataType);
371 Connect(input, layer, tensorInfo);
372 Connect(layer, output, tensorInfo);
373 CreateTensorHandles(graph, factory);
374
375 // Makes the workload and checks it.
376 auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
377 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
378 CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
379 CHECK(queueDescriptor.m_Inputs.size() == 1);
380 CHECK(queueDescriptor.m_Outputs.size() == 1);
381 CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
382 CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
383 CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
384 CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
385 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
386
387 // Returns so we can do extra, backend-specific tests.
388 return workload;
389}
390
391template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
392std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlobWorkloadTest(
393 armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
394{
395 TensorShape tensorShape;
396 switch (dataLayout)
397 {
398 case DataLayout::NHWC:
399 tensorShape = { 2, 4, 4, 3 };
400 break;
401 case DataLayout::NCHW:
402 default:
403 tensorShape = { 2, 3, 4, 4 };
404 }
405
406 // Creates the layer we're testing.
407 BatchNormalizationDescriptor layerDesc;
408 layerDesc.m_Eps = 0.05f;
409 layerDesc.m_DataLayout = dataLayout;
410
411 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
412
413 armnn::TensorInfo weightInfo({3}, DataType);
414 layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
415 layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
416 layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
417 layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
418 layer->m_Mean->Allocate();
419 layer->m_Variance->Allocate();
420 layer->m_Beta->Allocate();
421 layer->m_Gamma->Allocate();
422
423 auto activationDesc = std::make_shared<ActivationDescriptor>();
424 activationDesc->m_A = 10.0f;
425 activationDesc->m_B = 5.0f;
426 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
427
428 layer->SetAdditionalInfoForObject(activationDesc);
429
430 // Check that the additional information can be queried from the layer
431 std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
432 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
433 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
434 ARMNN_ASSERT(
435 static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
436 );
437
438 // Creates extra layers.
439 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
440 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
441
442 // Connects up.
443 armnn::TensorInfo tensorInfo(tensorShape, DataType);
444 Connect(input, layer, tensorInfo);
445 Connect(layer, output, tensorInfo);
446 CreateTensorHandles(graph, factory);
447
448 // Makes the workload and checks it.
449 auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
450 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
451 const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
452 IgnoreUnused(queueDescBlobPtr);
453 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
454 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
455 ARMNN_ASSERT(
456 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
457 );
458
459 CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
460 CHECK(queueDescriptor.m_Inputs.size() == 1);
461 CHECK(queueDescriptor.m_Outputs.size() == 1);
462 CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
463 CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
464 CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
465 CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
466 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
467
468 // Returns so we can do extra, backend-specific tests.
469 return workload;
470}
471
472template <typename Convolution2dWorkload, armnn::DataType DataType>
473std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
474 armnn::Graph& graph,
475 DataLayout dataLayout = DataLayout::NCHW,
476 const ModelOptions& modelOptions = {})
477{
478 // Creates the layer we're testing.
479 Convolution2dDescriptor layerDesc;
480 layerDesc.m_PadLeft = 3;
481 layerDesc.m_PadRight = 3;
482 layerDesc.m_PadTop = 1;
483 layerDesc.m_PadBottom = 1;
484 layerDesc.m_StrideX = 2;
485 layerDesc.m_StrideY = 4;
486 layerDesc.m_BiasEnabled = true;
487 layerDesc.m_DataLayout = dataLayout;
488
489 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
490
491 TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
492 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
493 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
494
495 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
496 layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
497
498 layer->m_Weight->Allocate();
499 layer->m_Bias->Allocate();
500
501 // Creates extra layers.
502 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
503 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
504
505 // Connects up.
506 Connect(input, layer, TensorInfo(inputShape, DataType));
507 Connect(layer, output, TensorInfo(outputShape, DataType));
508 CreateTensorHandles(graph, factory);
509
510 // Makes the workload and checks it.
511 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
512
513 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
514 CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
515 CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
516 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
517 CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
518 CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
519 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
520 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
521 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
522
523 CHECK(queueDescriptor.m_Inputs.size() == 1);
524 CHECK(queueDescriptor.m_Outputs.size() == 1);
525 CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
526 CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
527 TensorInfo({2}, GetBiasDataType(DataType))));
528
529 // Returns so we can do extra, backend-specific tests.
530 return workload;
531}
532
533template<typename Convolution2dWorkload, armnn::DataType DataType>
534std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlobWorkloadTest(
535 armnn::IWorkloadFactory& factory,
536 armnn::Graph& graph,
537 DataLayout dataLayout = DataLayout::NCHW,
538 const ModelOptions& modelOptions = {})
539{
540 // Creates the layer we're testing.
541 Convolution2dDescriptor layerDesc;
542 layerDesc.m_PadLeft = 3;
543 layerDesc.m_PadRight = 3;
544 layerDesc.m_PadTop = 1;
545 layerDesc.m_PadBottom = 1;
546 layerDesc.m_StrideX = 2;
547 layerDesc.m_StrideY = 4;
548 layerDesc.m_BiasEnabled = true;
549 layerDesc.m_DataLayout = dataLayout;
550
551
552 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
553
554 TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
555 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
556 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
557
558 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
559 layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
560
561 layer->m_Weight->Allocate();
562 layer->m_Bias->Allocate();
563
564 auto activationDesc = std::make_shared<ActivationDescriptor>();
565 activationDesc->m_A = 10.0f;
566 activationDesc->m_B = 5.0f;
567 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
568
569 layer->SetAdditionalInfoForObject(activationDesc);
570
571 // Check that the additional information can be queried from the layer
572 std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
573
574 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
575 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
576 ARMNN_ASSERT(
577 static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
578 );
579
580 // Creates extra layers.
581 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
582 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
583
584 // Connects up.
585 Connect(input, layer, TensorInfo(inputShape, DataType));
586 Connect(layer, output, TensorInfo(outputShape, DataType));
587 CreateTensorHandles(graph, factory);
588
589 // Makes the workload and checks it.
590 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
591
592 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
593 const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
594 IgnoreUnused(queueDescBlobPtr);
595 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
596 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
597 ARMNN_ASSERT(
598 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
599 );
600
601 CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
602 CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
603 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
604 CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
605 CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
606 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
607 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
608 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
609 CHECK(queueDescriptor.m_Outputs.size() == 1);
610 CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
611 CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
612 TensorInfo({2}, GetBiasDataType(DataType))));
613 CHECK(queueDescriptor.m_Inputs.size() == 1);
614
615 // Returns so we can do extra, backend-specific tests.
616 return workload;
617}
618
619template <typename Convolution2dWorkload, armnn::DataType DataType>
620std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory,
621 armnn::Graph& graph,
622 DataLayout dataLayout = DataLayout::NCHW,
623 const ModelOptions& modelOptions = {})
624{
625 // Creates the layer we're testing.
626 Convolution2dDescriptor layerDesc;
627 layerDesc.m_PadLeft = 0;
628 layerDesc.m_PadRight = 0;
629 layerDesc.m_PadTop = 0;
630 layerDesc.m_PadBottom = 0;
631 layerDesc.m_StrideX = 1;
632 layerDesc.m_StrideY = 1;
633 layerDesc.m_BiasEnabled = false;
634 layerDesc.m_DataLayout = dataLayout;
635
636 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
637
638 TensorShape weightShape = TensorShape{32, 32, 3, 3};
639 TensorShape inputShape = TensorShape{1, 32, 149, 149};
640 TensorShape outputShape = TensorShape{1, 32, 147, 147};
641
642 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
643 layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
644
645 layer->m_Weight->Allocate();
646 layer->m_Bias->Allocate();
647
648 // Creates extra layers.
649 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
650 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
651
652 // Connects up.
653 Connect(input, layer, TensorInfo(inputShape, DataType));
654 Connect(layer, output, TensorInfo(outputShape, DataType));
655 CreateTensorHandles(graph, factory);
656
657 // Makes the workload and checks it.
658 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
659
660 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
661 CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
662 CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
663 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0);
664 CHECK(queueDescriptor.m_Parameters.m_PadRight == 0);
665 CHECK(queueDescriptor.m_Parameters.m_PadTop == 0);
666 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0);
667 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
668
669 CHECK(queueDescriptor.m_Inputs.size() == 1);
670 CHECK(queueDescriptor.m_Outputs.size() == 1);
671 CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
672
673 // Returns so we can do extra, backend-specific tests.
674 return workload;
675}
676
677template <typename LstmWorkload>
678std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
679{
680 // This parameter setting is for withCifgWithPeepholeNoProjection
681 LstmDescriptor layerDesc;
682 layerDesc.m_ActivationFunc = 4;
683 layerDesc.m_ClippingThresCell = 0.0f;
684 layerDesc.m_ClippingThresProj = 0.0f;
685 layerDesc.m_CifgEnabled = true;
686 layerDesc.m_PeepholeEnabled = true;
687 layerDesc.m_ProjectionEnabled = false;
688
689 LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
690 unsigned int batchSize = 2;
691 unsigned int inputSize = 2;
692 unsigned int numUnits = 4;
693 unsigned int outputSize = 4;
694
695 layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
696 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
697 layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
698 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
699 layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
700 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
701 layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
702 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
703 layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
704 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
705 layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
706 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
707 layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
708 (TensorInfo({ numUnits }, DataType::Float32));
709 layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
710 (TensorInfo({ numUnits }, DataType::Float32));
711 layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
712 (TensorInfo({ numUnits }, DataType::Float32));
713
714 layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
715 layer->m_BasicParameters.m_InputToCellWeights->Allocate();
716 layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
717 layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
718 layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
719 layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
720 layer->m_BasicParameters.m_ForgetGateBias->Allocate();
721 layer->m_BasicParameters.m_CellBias->Allocate();
722 layer->m_BasicParameters.m_OutputGateBias->Allocate();
723
724
725 if (layerDesc.m_PeepholeEnabled)
726 {
727 layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
728 (TensorInfo({ numUnits }, DataType::Float32));
729 layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
730 (TensorInfo({ numUnits }, DataType::Float32));
731 layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
732 layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
733 }
734
735 // create input and output layers
736 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
737 Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
738 Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
739 Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
740 Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
741 Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
742 Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
743
744 // connect up
745 armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
746 armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
747 armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
748 armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
749 DataType::Float32);
750 Connect(input, layer, lstmTensorInfo1, 0, 0);
751 Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
752 Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
753 Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
754 Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
755 Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
756 Connect(layer, output, lstmTensorInfo3, 3, 0);
757
758 CreateTensorHandles(graph, factory);
759
760 // make the workload and check it
761 auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
762 LstmQueueDescriptor queueDescriptor = workload->GetData();
763 CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
764 CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
765 CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
766 CHECK(queueDescriptor.m_Inputs.size() == 3);
767 CHECK(queueDescriptor.m_Outputs.size() == 4);
768
769 CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
770 DataType::Float32)));
771 CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
772 DataType::Float32)));
773 CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
774 return workload;
775}
776
777template <typename QuantizedLstmWorkload>
778std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IWorkloadFactory& factory,
779 armnn::Graph& graph)
780{
781 auto layer = graph.AddLayer<QuantizedLstmLayer>("quantizedLstmlayer");
782 unsigned int numBatches = 2;
783 unsigned int inputSize = 2;
784 unsigned int outputSize = 4;
785
786 // Scale/Offset for input/output, cellState In/Out, weights, bias
787 float inputOutputScale = 0.0078125f;
788 int32_t inputOutputOffset = 128;
789
790 float cellStateScale = 0.00048828125f;
791 int32_t cellStateOffset = 0;
792
793 float weightsScale = 0.00408021f;
794 int32_t weightsOffset = 100;
795
796 float biasScale = 3.1876640625e-05f;
797 int32_t biasOffset = 0;
798
799 // Weights and bias tensor and quantization info
800 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
801 armnn::DataType::QAsymmU8,
802 weightsScale,
803 weightsOffset);
804
805 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
806 armnn::DataType::QAsymmU8,
807 weightsScale,
808 weightsOffset);
809
810 armnn::TensorInfo biasInfo({outputSize},
811 armnn::DataType::Signed32,
812 biasScale,
813 biasOffset);
814
815 // Weights and bias
816 layer->m_QuantizedLstmParameters.m_InputToInputWeights =
817 std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
818 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
819 std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
820 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
821 std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
822 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
823 std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
824
825 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
826 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
827 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
828 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
829 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
830 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
831 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
832 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
833
834 layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
835 layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
836 layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
837 layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
838
839 // Allocate weights and bias
840 layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
841 layer->m_QuantizedLstmParameters.m_InputToForgetWeights->Allocate();
842 layer->m_QuantizedLstmParameters.m_InputToCellWeights->Allocate();
843 layer->m_QuantizedLstmParameters.m_InputToOutputWeights->Allocate();
844
845 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->Allocate();
846 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Allocate();
847 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->Allocate();
848 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Allocate();
849
850 layer->m_QuantizedLstmParameters.m_InputGateBias->Allocate();
851 layer->m_QuantizedLstmParameters.m_ForgetGateBias->Allocate();
852 layer->m_QuantizedLstmParameters.m_CellBias->Allocate();
853 layer->m_QuantizedLstmParameters.m_OutputGateBias->Allocate();
854
855 // Create input and output layers
856 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
857 Layer* const cellStateIn = graph.AddLayer<InputLayer>(1, "cellStateIn");
858 Layer* const outputStateIn = graph.AddLayer<InputLayer>(2, "outputStateIn");
859
860 Layer* const cellStateOut = graph.AddLayer<OutputLayer>(0, "cellStateOut");
861 Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
862
863 // Input/output tensor info and quantization info
864 armnn::TensorInfo inputInfo({numBatches , inputSize},
865 armnn::DataType::QAsymmU8,
866 inputOutputScale,
867 inputOutputOffset);
868
869 armnn::TensorInfo cellStateInfo({numBatches , outputSize},
870 armnn::DataType::QSymmS16,
871 cellStateScale,
872 cellStateOffset);
873
874 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
875 armnn::DataType::QAsymmU8,
876 inputOutputScale,
877 inputOutputOffset);
878
879 // Connect input/output slots
880 Connect(input, layer, inputInfo, 0, 0);
881 Connect(cellStateIn, layer, cellStateInfo, 0, 1);
882 Connect(outputStateIn, layer, outputStateInfo, 0, 2);
883
884 Connect(layer, cellStateOut, cellStateInfo, 0, 0);
885 Connect(layer, outputStateOut, outputStateInfo, 1, 0);
886
887 CreateTensorHandles(graph, factory);
888
889 // Create workload and check layer support
890 auto workload = MakeAndCheckWorkload<QuantizedLstmWorkload>(*layer, factory);
891 QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
892
893 // Validate input/output sizes
894 CHECK(queueDescriptor.m_Inputs.size() == 3);
895 CHECK(queueDescriptor.m_Outputs.size() == 2);
896
897 // Validate weight tensor info
898 CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
899 CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
900 CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
901 CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
902
903 CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
904 CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
905 CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
906 CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
907
908 CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
909 CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
910 CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
911 CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
912
913 return workload;
914}
915
916template <typename QLstmWorkload>
917std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory& factory,
918 armnn::Graph& graph)
919{
920 QLstmDescriptor layerDesc;
921 layerDesc.m_CifgEnabled = true;
922 layerDesc.m_PeepholeEnabled = false;
923 layerDesc.m_ProjectionEnabled = false;
924 layerDesc.m_LayerNormEnabled = true;
925
926 layerDesc.m_CellClip = 0.0f;
927 layerDesc.m_ProjectionClip = 0.0f;
928
929 layerDesc.m_HiddenStateZeroPoint = 0;
930 layerDesc.m_HiddenStateScale = 0.007f;
931
932 layerDesc.m_InputIntermediateScale = 0.007059f;
933 layerDesc.m_ForgetIntermediateScale = 0.007812f;
934 layerDesc.m_CellIntermediateScale = 0.007059f;
935 layerDesc.m_OutputIntermediateScale = 0.007812f;
936
937 QLstmLayer* const layer = graph.AddLayer<QLstmLayer>(layerDesc, "qLstm");
938
939 unsigned int numBatches = 2;
940 unsigned int inputSize = 4;
941 unsigned int numUnits = 4;
942 unsigned int outputSize = 4;
943
944 // Scale/Offset quantization info
945 float inputScale = 0.0078125f;
946 int32_t inputOffset = 0;
947
948 // if (!projectionEnabled) outputScale == hiddenStateScale
949 float outputScale = layerDesc.m_HiddenStateScale;
950 int32_t outputOffset = layerDesc.m_HiddenStateZeroPoint;
951
952 float cellStateScale = 3.05176e-05f;
953 int32_t cellStateOffset = 0;
954
955 float weightsScale = 0.00784314f;
956 int32_t weightsOffset = 0;
957
958 float layerNormScale = 3.05182e-05f;
959 int32_t layerNormOffset = 0;
960
961 float biasScale = layerNormScale / 1024;
962 int32_t biasOffset = 0;
963
964 // Weights and bias tensor and quantization info
965 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
966 armnn::DataType::QSymmS8,
967 weightsScale,
968 weightsOffset);
969
970 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
971 armnn::DataType::QSymmS8,
972 weightsScale,
973 weightsOffset);
974
975 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
976
977 armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
978
979 // Create and allocate tensors
980 layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
981 layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
982 layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
983
984 layer->m_BasicParameters.m_RecurrentToForgetWeights =
985 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
986 layer->m_BasicParameters.m_RecurrentToCellWeights =
987 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
988 layer->m_BasicParameters.m_RecurrentToOutputWeights =
989 std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
990
991 layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
992 layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
993 layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
994
995 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
996 std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
997 layer->m_LayerNormParameters.m_CellLayerNormWeights =
998 std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
999 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1000 std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
1001
1002 layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
1003 layer->m_BasicParameters.m_InputToCellWeights->Allocate();
1004 layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
1005
1006 layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
1007 layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
1008 layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
1009
1010 layer->m_BasicParameters.m_ForgetGateBias->Allocate();
1011 layer->m_BasicParameters.m_CellBias->Allocate();
1012 layer->m_BasicParameters.m_OutputGateBias->Allocate();
1013
1014 layer->m_LayerNormParameters.m_ForgetLayerNormWeights->Allocate();
1015 layer->m_LayerNormParameters.m_CellLayerNormWeights->Allocate();
1016 layer->m_LayerNormParameters.m_OutputLayerNormWeights->Allocate();
1017
1018 // Input and output layers
1019 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1020 Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
1021 Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
1022
1023 Layer* const outputStateOut = graph.AddLayer<OutputLayer>(0, "outputStateOut");
1024 Layer* const cellStateOut = graph.AddLayer<OutputLayer>(1, "cellStateOut");
1025 Layer* const output = graph.AddLayer<OutputLayer>(2, "output");
1026
1027 // Input/Output tensor info
1028 armnn::TensorInfo inputInfo({numBatches , inputSize},
1029 armnn::DataType::QAsymmS8,
1030 inputScale,
1031 inputOffset);
1032
1033 armnn::TensorInfo cellStateInfo({numBatches , numUnits},
1034 armnn::DataType::QSymmS16,
1035 cellStateScale,
1036 cellStateOffset);
1037
1038 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
1039 armnn::DataType::QAsymmS8,
1040 outputScale,
1041 outputOffset);
1042
1043 // Connect layers to slots
1044 Connect(input, layer, inputInfo, 0, 0);
1045 Connect(outputStateIn, layer, outputStateInfo, 0, 1);
1046 Connect(cellStateIn, layer, cellStateInfo, 0, 2);
1047
1048 Connect(layer, outputStateOut, outputStateInfo, 0, 0);
1049 Connect(layer, cellStateOut, cellStateInfo, 1, 0);
1050 Connect(layer, output, outputStateInfo, 2, 0);
1051
1052 CreateTensorHandles(graph, factory);
1053
1054 // Create and check workload
1055 auto workload = MakeAndCheckWorkload<QLstmWorkload>(*layer, factory);
1056 QLstmQueueDescriptor queueDescriptor = workload->GetData();
1057 CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
1058 CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
1059 CHECK(queueDescriptor.m_Inputs.size() == 3);
1060 CHECK(queueDescriptor.m_Outputs.size() == 3);
1061
1062 CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
1063 CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
1064 CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
1065
1066 CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
1067 CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
1068 CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
1069
1070 CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
1071 CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
1072 CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
1073
1074 return workload;
1075}
1076
1077template <typename Convolution2dWorkload, armnn::DataType DataType>
1078std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
1079 armnn::Graph& graph)
1080{
1081 // Creates the layer we're testing.
1082 Convolution2dDescriptor layerDesc;
1083 layerDesc.m_PadLeft = 1;
1084 layerDesc.m_PadRight = 1;
1085 layerDesc.m_PadTop = 1;
1086 layerDesc.m_PadBottom = 1;
1087 layerDesc.m_StrideX = 1;
1088 layerDesc.m_StrideY = 1;
1089 layerDesc.m_BiasEnabled = true;
1090
1091 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
1092
1093 float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
1094 float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
1095
1096 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
1097 layer->m_Bias = std::make_unique<ScopedTensorHandle>
1098 (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
1099 layer->m_Weight->Allocate();
1100 layer->m_Bias->Allocate();
1101
1102 // Creates extra layers.
1103 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1104 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1105
1106 // Connects up.
1107 Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
1108 Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
1109 CreateTensorHandles(graph, factory);
1110
1111 // Makes the workload and checks it.
1112 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
1113
1114 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
1115 CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
1116 CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
1117 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
1118 CHECK(queueDescriptor.m_Parameters.m_PadRight == 1);
1119 CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
1120 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
1121 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
1122
1123 CHECK(queueDescriptor.m_Inputs.size() == 1);
1124 CHECK(queueDescriptor.m_Outputs.size() == 1);
1125 CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
1126 DataType, inputsQScale)));
1127 CHECK((queueDescriptor.m_Bias->GetTensorInfo()
1128 == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
1129
1130 // Returns so we can do extra, backend-specific tests.
1131 return workload;
1132}
1133
1134template <typename DepthwiseConvolution2dFloat32Workload, armnn::DataType DataType>
1135std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
1136 armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
1137{
1138 // Creates the layer we're testing.
1139 DepthwiseConvolution2dDescriptor layerDesc;
1140 layerDesc.m_PadLeft = 1;
1141 layerDesc.m_PadRight = 2;
1142 layerDesc.m_PadTop = 1;
1143 layerDesc.m_PadBottom = 2;
1144 layerDesc.m_StrideX = 1;
1145 layerDesc.m_StrideY = 1;
1146 layerDesc.m_BiasEnabled = false;
1147 layerDesc.m_DataLayout = dataLayout;
1148
1149 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
1150
1151 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 4, 4, 2}, DataType)); // [ 1, H, W, I*M ]
1152 layer->m_Weight->Allocate();
1153
1154 // Creates extra layers.
1155 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1156 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1157
1158 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
1159 TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
1160 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
1161 TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
1162
1163 // Connects up.
1164 Connect(input, layer, TensorInfo(inputShape, DataType));
1165 Connect(layer, output, TensorInfo(outputShape, DataType));
1166 CreateTensorHandles(graph, factory);
1167
1168 // Makes the workload and checks it.
1169 auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
1170
1171 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
1172 CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
1173 CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
1174 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
1175 CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
1176 CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
1177 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2);
1178 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false);
1179 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
1180
1181 CHECK(queueDescriptor.m_Inputs.size() == 1);
1182 CHECK(queueDescriptor.m_Outputs.size() == 1);
1183 CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType)));
1184
1185 // Returns so we can do extra, backend-specific tests.
1186 return workload;
1187}
1188
1189template <typename FullyConnectedWorkload, armnn::DataType DataType>
1190std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
1191 armnn::Graph& graph)
1192{
1193 // Creates the layer we're testing.
1194 FullyConnectedDescriptor layerDesc;
1195 layerDesc.m_BiasEnabled = false;
1196 layerDesc.m_TransposeWeightMatrix = true;
1197
1198 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
1199
1200 float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
1201 float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
1202
1203 // As optimization isn't run member variables need to be updated.
1204 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
1205 layer->m_Weight->Allocate();
1206
1207 armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
1208 weightsTensorInfo.SetConstant();
1209
1210 // Creates extra layers.
1211 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1212 auto const weights = graph.AddLayer<ConstantLayer>("weights");
1213 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1214
1215 weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
1216 weights->m_LayerOutput->Allocate();
1217
1218 // Connects up.
1219 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
1220 Connect(weights, layer, weightsTensorInfo, 0, 1);
1221 Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
1222 CreateTensorHandles(graph, factory);
1223
1224 // Makes the workload and checks it.
1225 auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
1226
1227 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
1228 CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
1229
1230 CHECK(queueDescriptor.m_Inputs.size() == 2);
1231 CHECK(queueDescriptor.m_Outputs.size() == 1);
1232
1233 // Returns so we can do extra, backend-specific tests.
1234 return workload;
1235}
1236
1237template <typename FullyConnectedWorkload, armnn::DataType DataType>
1238std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
1239 (armnn::IWorkloadFactory& factory,
1240 armnn::Graph& graph)
1241{
1242 // Creates the layer we're testing.
1243 FullyConnectedDescriptor layerDesc;
1244 layerDesc.m_BiasEnabled = true;
1245 layerDesc.m_TransposeWeightMatrix = true;
1246
1247 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
1248
1249 float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
1250 float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
1251
1252 // As optimization isn't run member variables need to be updated.
1253 layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
1254 layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
1255 layer->m_Weight->Allocate();
1256 layer->m_Bias->Allocate();
1257
1258 armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
1259 armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
1260 weightsTensorInfo.SetConstant();
1261 biasesTensorInfo.SetConstant();
1262
1263 auto activationDesc = std::make_shared<ActivationDescriptor>();
1264 activationDesc->m_A = 10.0f;
1265 activationDesc->m_B = 5.0f;
1266 activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
1267
1268 layer->SetAdditionalInfoForObject(activationDesc);
1269
1270 // Check that the additional information can be queried from the layer
1271 std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
1272 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
1273 ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
1274 ARMNN_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
1275 armnn::ActivationFunction::BoundedReLu);
1276
1277 // Creates extra layers.
1278 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1279 auto const weights = graph.AddLayer<ConstantLayer>("weights");
1280 auto const biases = graph.AddLayer<ConstantLayer>("biases");
1281 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1282
1283 weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
1284 weights->m_LayerOutput->Allocate();
1285 biases->m_LayerOutput = std::make_unique<ScopedTensorHandle>(biasesTensorInfo);
1286 biases->m_LayerOutput->Allocate();
1287
1288 // Connects up.
1289 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
1290 Connect(weights, layer, weightsTensorInfo, 0, 1);
1291 Connect(biases, layer, biasesTensorInfo, 0, 2);
1292 Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
1293 CreateTensorHandles(graph, factory);
1294
1295 // Makes the workload and checks it.
1296 auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
1297
1298 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
1299
1300 const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
1301 IgnoreUnused(queueDescBlobPtr);
1302
1303 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
1304 ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
1305 ARMNN_ASSERT(
1306 static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
1307 );
1308
1309 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
1310 CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
1311 CHECK(queueDescriptor.m_Inputs.size() == 3);
1312 CHECK(queueDescriptor.m_Outputs.size() == 1);
1313
1314 // Returns so we can do extra, backend-specific tests.
1315 return workload;
1316}
1317
1318template <typename FullyConnectedWorkload, armnn::DataType DataType>
1319std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest
1320 (armnn::IWorkloadFactory& factory,
1321 armnn::Graph& graph)
1322{
1323 // Creates the layer we're testing.
1324 FullyConnectedDescriptor layerDesc;
1325 layerDesc.m_BiasEnabled = true;
1326 layerDesc.m_TransposeWeightMatrix = true;
1327 layerDesc.m_ConstantWeights = false;
1328
1329 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
1330
1331 float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
1332 float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
1333
1334 // Creates extra layers with weights and biases as input layers.
1335 Layer* const input = graph.AddLayer<InputLayer>(1, "input");
1336 Layer* const weights = graph.AddLayer<InputLayer>(2, "weights");
1337 Layer* const biases = graph.AddLayer<InputLayer>(3, "biases");
1338 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1339
1340 // Connects up.
1341 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
1342 Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1);
1343 Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2);
1344 Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
1345 CreateTensorHandles(graph, factory);
1346
1347 // Makes the workload and checks it.
1348 auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
1349
1350 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
1351
1352 CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
1353 CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
1354 CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false);
1355 CHECK(queueDescriptor.m_Inputs.size() == 3);
1356 CHECK(queueDescriptor.m_Outputs.size() == 1);
1357
1358 // Returns so we can do extra, backend-specific tests.
1359 return workload;
1360}
1361
1362
1363template <typename NormalizationWorkload, armnn::DataType DataType>
1364std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
1365 armnn::Graph& graph,
1366 DataLayout dataLayout = DataLayout::NCHW)
1367{
1368 // Creates the layer we're testing.
1369 NormalizationDescriptor layerDesc;
1370 layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1371 layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1372 layerDesc.m_NormSize = 3;
1373 layerDesc.m_Alpha = 0.5f;
1374 layerDesc.m_Beta = -1.0f;
1375 layerDesc.m_K = 0.2f;
1376 layerDesc.m_DataLayout = dataLayout;
1377
1378 NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
1379
1380 // Creates extra layers.
1381 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1382 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1383
1384 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
1385 TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
1386 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
1387 TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
1388
1389 // Connects up.
1390 armnn::TensorInfo inputTensorInfo(inputShape, DataType);
1391 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
1392 Connect(input, layer, inputTensorInfo);
1393 Connect(layer, output, outputTensorInfo);
1394 CreateTensorHandles(graph, factory);
1395
1396 // Makes the workload and checks it.
1397 auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
1398
1399 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
1400 CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
1401 CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
1402 CHECK(queueDescriptor.m_Parameters.m_NormSize == 3);
1403 CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
1404 CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f);
1405 CHECK(queueDescriptor.m_Parameters.m_K == 0.2f);
1406 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
1407
1408 CHECK(queueDescriptor.m_Inputs.size() == 1);
1409 CHECK(queueDescriptor.m_Outputs.size() == 1);
1410
1411 // Returns so we can do extra, backend-specific tests.
1412 return workload;
1413}
1414
1415template <typename Pooling2dWorkload, armnn::DataType DataType>
1416std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
1417 armnn::Graph& graph,
1418 DataLayout dataLayout = DataLayout::NCHW)
1419{
1420 // Creates the layer we're testing.
1421 Pooling2dDescriptor layerDesc;
1422 layerDesc.m_PoolType = PoolingAlgorithm::Average;
1423 layerDesc.m_PoolWidth = 3;
1424 layerDesc.m_PoolHeight = 3;
1425 layerDesc.m_PadLeft = 2;
1426 layerDesc.m_PadRight = 2;
1427 layerDesc.m_PadTop = 1;
1428 layerDesc.m_PadBottom = 1;
1429 layerDesc.m_StrideX = 2;
1430 layerDesc.m_StrideY = 3;
1431 layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor;
1432 layerDesc.m_DataLayout = dataLayout;
1433
1434 Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
1435
1436 // Create extra layers
1437 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1438 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1439
1440 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
1441 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
1442
1443 // Connect up
1444 Connect(input, layer, TensorInfo(inputShape, DataType));
1445 Connect(layer, output, TensorInfo(outputShape, DataType));
1446 CreateTensorHandles(graph, factory);
1447
1448 // Make the workload and checks it
1449 auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
1450
1451 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
1452 CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
1453 CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
1454 CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3);
1455 CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3);
1456 CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
1457 CHECK(queueDescriptor.m_Parameters.m_StrideY == 3);
1458 CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2);
1459 CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
1460 CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
1461 CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
1462 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
1463
1464 CHECK(queueDescriptor.m_Inputs.size() == 1);
1465 CHECK(queueDescriptor.m_Outputs.size() == 1);
1466
1467 // Return so we can do extra, backend-specific tests
1468 return workload;
1469}
1470
1471template <typename SoftmaxWorkload, armnn::DataType DataType>
1472std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
1473 armnn::Graph& graph)
1474{
1475 // Create the layer we're testing.
1476 SoftmaxDescriptor softmaxDescriptor;
1477 // Set Axis to -1 if CL or Neon until further Axes are supported.
1478 if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
1479 {
1480 softmaxDescriptor.m_Axis = -1;
1481 }
1482
1483 Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
1484 // Create extra layers.
1485 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1486 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1487
1488 // Connect up
1489 armnn::TensorInfo tensorInfo({4, 1}, DataType);
1490 if (DataType == armnn::DataType::QAsymmU8)
1491 {
1492 tensorInfo.SetQuantizationOffset(0);
1493 tensorInfo.SetQuantizationScale(1.f / 256);
1494 }
1495 else if (DataType == armnn::DataType::QAsymmS8)
1496 {
1497 tensorInfo.SetQuantizationOffset(-128);
1498 tensorInfo.SetQuantizationScale(1.f / 256);
1499 }
1500
1501 Connect(input, layer, tensorInfo);
1502 Connect(layer, output, tensorInfo);
1503 CreateTensorHandles(graph, factory);
1504
1505 // Make the workload and checks it.
1506 auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
1507
1508 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
1509 CHECK(queueDescriptor.m_Inputs.size() == 1);
1510 CHECK(queueDescriptor.m_Outputs.size() == 1);
1511
1512 // Return so we can do extra, backend-specific tests.
1513 return workload;
1514}
1515
1516template<typename SplitterWorkload, armnn::DataType DataType>
1517std::unique_ptr<SplitterWorkload>
1518 CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
1519{
1520 // Create the layer we're testing.
1521 // NOTE: need three dimensions channels, height/y, width/x because the Compute
1522 // library restricts subtensors to have the same x and y dimensions as
1523 // their parent tensors, and therefore the origin on the x and y dimension
1524 // has to be zero for any view. So we need a third dimension to split...
1525 // NOTE: arguments are: number of views, number of dimensions.
1526 ViewsDescriptor layerDesc(3, 3);
1527 // NOTE: arguments are: view, dimension, value.
1528 layerDesc.SetViewOriginCoord(0, 0, 0);
1529 layerDesc.SetViewOriginCoord(1, 0, 1);
1530 layerDesc.SetViewOriginCoord(2, 0, 3);
1531
1532 Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
1533
1534 // Adds extra layers.
1535 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1536 Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
1537 Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
1538 Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
1539
1540 // Connects up.
1541 armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
1542 Connect(input, layer, tensorInfo);
1543
1544 armnn::TensorInfo output0Info({1, 7, 7}, DataType);
1545 armnn::TensorInfo output1Info({2, 7, 7}, DataType);
1546 armnn::TensorInfo output2Info({2, 7, 7}, DataType);
1547
1548 Connect(layer, output0, output0Info, 0, 0);
1549 Connect(layer, output1, output1Info, 1, 0);
1550 Connect(layer, output2, output2Info, 2, 0);
1551
1552 CreateTensorHandles(graph, factory);
1553
1554 // Makes the workload and checks it.
1555 auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
1556
1557 SplitterQueueDescriptor queueDescriptor = workload->GetData();
1558 CHECK(queueDescriptor.m_Inputs.size() == 1);
1559 CHECK(queueDescriptor.m_Outputs.size() == 3);
1560 CHECK(queueDescriptor.m_ViewOrigins.size() == 3);
1561
1562 CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
1563 CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
1564 CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
1565 CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
1566 CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
1567 CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
1568 CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
1569 CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
1570 CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
1571
1572 // Returns so we can do extra, backend-specific tests.
1573 return workload;
1574}
1575
1576/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads.
1577template<typename SplitterWorkload, typename ConcatWorkload, armnn::DataType DataType>
1578std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
1579 CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph)
1580{
1581 armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
1582
1583 armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
1584 armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
1585
1586 //Constructs the graph.
1587 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1588
1589 armnn::ViewsDescriptor splitterViews(2);
1590 splitterViews.SetViewOriginCoord(0, 0, 0);
1591 splitterViews.SetViewOriginCoord(0, 1, 0);
1592 splitterViews.SetViewOriginCoord(0, 2, 0);
1593 splitterViews.SetViewOriginCoord(0, 3, 0);
1594
1595 splitterViews.SetViewOriginCoord(1, 0, 0);
1596 splitterViews.SetViewOriginCoord(1, 1, 1);
1597 splitterViews.SetViewOriginCoord(1, 2, 0);
1598 splitterViews.SetViewOriginCoord(1, 3, 0);
1599
1600 // create splitter layer
1601 Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
1602 CHECK(splitter);
1603
1604 armnn::OriginsDescriptor concatViews(2);
1605 concatViews.SetViewOriginCoord(0, 0, 0);
1606 concatViews.SetViewOriginCoord(0, 1, 1);
1607 concatViews.SetViewOriginCoord(0, 2, 0);
1608 concatViews.SetViewOriginCoord(0, 3, 0);
1609
1610 concatViews.SetViewOriginCoord(1, 0, 0);
1611 concatViews.SetViewOriginCoord(1, 1, 0);
1612 concatViews.SetViewOriginCoord(1, 2, 0);
1613 concatViews.SetViewOriginCoord(1, 3, 0);
1614
1615 // create concat layer
1616 Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
1617 CHECK(concat);
1618
1619 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1620
1621 // Adds connections.
1622 // connect input to splitter
1623 Connect(input, splitter, inputTensorInfo, 0, 0);
1624 // connect splitter[0] to concat[1]
1625 Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
1626 // connect splitter[1] to concat[0]
1627 Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
1628 // connect concat to output
1629 Connect(concat, output, inputTensorInfo, 0, 0);
1630
1631 // created tensor handles
1632 CreateTensorHandles(graph, factory);
1633
1634 // created splitter workload
1635 auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
1636 CHECK(workloadSplitter);
1637 // created concat workload
1638 auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
1639 CHECK(workloadConcat);
1640
1641 return {std::move(workloadSplitter), std::move(workloadConcat)};
1642}
1643
1644
1645/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
1646/// connected to two different activation layers
1647template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
1648void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
1649 std::unique_ptr<SplitterWorkload>& wlSplitter,
1650 std::unique_ptr<ActivationWorkload>& wlActiv0_0,
1651 std::unique_ptr<ActivationWorkload>& wlActiv0_1,
1652 std::unique_ptr<ActivationWorkload>& wlActiv1_0,
1653 std::unique_ptr<ActivationWorkload>& wlActiv1_1)
1654{
1655 armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
1656 armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
1657 armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
1658
1659 //Constructs the graph.
1660 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1661
1662 armnn::ViewsDescriptor splitterViews(2);
1663
1664 splitterViews.SetViewOriginCoord(0, 0, 0);
1665 splitterViews.SetViewOriginCoord(0, 1, 0);
1666 splitterViews.SetViewOriginCoord(0, 2, 0);
1667 splitterViews.SetViewOriginCoord(0, 3, 0);
1668
1669 splitterViews.SetViewOriginCoord(1, 0, 0);
1670 splitterViews.SetViewOriginCoord(1, 1, 1);
1671 splitterViews.SetViewOriginCoord(1, 2, 0);
1672 splitterViews.SetViewOriginCoord(1, 3, 0);
1673
1674 Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
1675
1676 armnn::ActivationDescriptor activationDesc;
1677
1678 Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0");
1679 Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1");
1680 Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0");
1681 Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1");
1682
1683 Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
1684 Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
1685 Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
1686 Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
1687
1688 // Adds connections.
1689 Connect(input, splitter, inputTensorInfo, 0, 0);
1690 Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
1691 Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
1692
1693 Connect(splitter, activ1_0, splitTensorInfo2, 1, 0);
1694 Connect(splitter, activ1_1, splitTensorInfo2, 1, 0);
1695
1696 Connect(activ0_0, output1, splitTensorInfo1, 0, 0);
1697 Connect(activ0_1, output2, splitTensorInfo1, 0, 0);
1698 Connect(activ1_0, output3, splitTensorInfo2, 0, 0);
1699 Connect(activ1_1, output4, splitTensorInfo2, 0, 0);
1700
1701 CreateTensorHandles(graph, factory);
1702
1703 auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
1704 auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, factory);
1705 auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, factory);
1706 auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, factory);
1707 auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, factory);
1708
1709 wlSplitter = std::move(workloadSplitter);
1710 wlActiv0_0 = std::move(workloadActiv0_0);
1711 wlActiv0_1 = std::move(workloadActiv0_1);
1712 wlActiv1_0 = std::move(workloadActiv1_0);
1713 wlActiv1_1 = std::move(workloadActiv1_1);
1714}
1715
1716template <typename ResizeWorkload, armnn::DataType DataType>
1717std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
1718 armnn::Graph& graph,
1719 DataLayout dataLayout = DataLayout::NCHW)
1720{
1721 TensorShape inputShape;
1722 TensorShape outputShape;
1723
1724 switch (dataLayout) {
1725 case DataLayout::NHWC:
1726 inputShape = { 2, 4, 4, 3 };
1727 outputShape = { 2, 2, 2, 3 };
1728 break;
1729 case DataLayout::NCHW:
1730 default:
1731 inputShape = { 2, 3, 4, 4 };
1732 outputShape = { 2, 3, 2, 2 };
1733 }
1734
1735 // Creates the layer we're testing.
1736 ResizeDescriptor resizeDesc;
1737 armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
1738 resizeDesc.m_Method = ResizeMethod::Bilinear;
1739 resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
1740 resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
1741 resizeDesc.m_DataLayout = dataLayout;
1742 Layer* const layer = graph.AddLayer<ResizeLayer>(resizeDesc, "resize");
1743
1744 // Creates extra layers.
1745 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1746 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1747
1748 // Connects up.
1749 armnn::TensorInfo inputTensorInfo(inputShape, DataType);
1750 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
1751 Connect(input, layer, inputTensorInfo);
1752 Connect(layer, output, outputTensorInfo);
1753 CreateTensorHandles(graph, factory);
1754
1755 // Makes the workload and checks it.
1756 auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
1757
1758 auto queueDescriptor = workload->GetData();
1759 CHECK(queueDescriptor.m_Inputs.size() == 1);
1760 CHECK(queueDescriptor.m_Outputs.size() == 1);
1761 CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
1762
1763 // Returns so we can do extra, backend-specific tests.
1764 return workload;
1765}
1766
1767template <typename BatchToSpaceNdWorkload, armnn::DataType DataType>
1768std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory,
1769 armnn::Graph& graph)
1770{
1771 BatchToSpaceNdDescriptor desc;
1772 Layer* const layer = graph.AddLayer<BatchToSpaceNdLayer>(desc, "batchToSpace");
1773
1774 // Creates extra layers.
1775 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1776 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1777
1778 // Connects up.
1779 armnn::TensorInfo tensorInfo({1, 1, 1, 1}, DataType);
1780
1781 Connect(input, layer, tensorInfo);
1782 Connect(layer, output, tensorInfo);
1783
1784 CreateTensorHandles(graph, factory);
1785
1786 // Makes the workload and checks it.
1787 auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
1788
1789 BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
1790 CHECK(queueDescriptor.m_Inputs.size() == 1);
1791 CHECK(queueDescriptor.m_Outputs.size() == 1);
1792
1793 return workload;
1794}
1795
1796template <typename LogSoftmaxWorkload, armnn::DataType DataType>
1797std::unique_ptr<LogSoftmaxWorkload> CreateLogSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
1798 armnn::Graph& graph)
1799{
1800 // Create the layer we're testing.
1801 LogSoftmaxDescriptor logSoftmaxDescriptor;
1802 // Set Axis to -1 if CL or Neon until further Axes are supported.
1803 if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
1804 {
1805 logSoftmaxDescriptor.m_Axis = -1;
1806 }
1807
1808 Layer* const layer = graph.AddLayer<LogSoftmaxLayer>(logSoftmaxDescriptor, "layer");
1809 // Create extra layers.
1810 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1811 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1812
1813 // Connect up
1814 armnn::TensorInfo tensorInfo({4, 1}, DataType);
1815
1816 Connect(input, layer, tensorInfo);
1817 Connect(layer, output, tensorInfo);
1818 CreateTensorHandles(graph, factory);
1819
1820 // Make the workload and checks it.
1821 auto workload = MakeAndCheckWorkload<LogSoftmaxWorkload>(*layer, factory);
1822
1823 LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
1824 CHECK(queueDescriptor.m_Inputs.size() == 1);
1825 CHECK(queueDescriptor.m_Outputs.size() == 1);
1826
1827 // Return so we can do extra, backend-specific tests.
1828 return workload;
1829}
1830
1831template <typename L2NormalizationWorkload, armnn::DataType DataType>
1832std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
1833 armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
1834{
1835 // Creates the layer we're testing.
1836 L2NormalizationDescriptor layerDesc;
1837 layerDesc.m_DataLayout = dataLayout;
1838
1839 Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm");
1840
1841 // Creates extra layers.
1842 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1843 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1844
1845 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
1846 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
1847 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
1848 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
1849
1850 // Connects up.
1851 armnn::TensorInfo inputTensorInfo(inputShape, DataType);
1852 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
1853 Connect(input, layer, inputTensorInfo);
1854 Connect(layer, output, outputTensorInfo);
1855 CreateTensorHandles(graph, factory);
1856
1857 // Makes the workload and checks it.
1858 auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
1859
1860 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
1861 CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
1862 CHECK(queueDescriptor.m_Inputs.size() == 1);
1863 CHECK(queueDescriptor.m_Outputs.size() == 1);
1864
1865 // Returns so we can do extra, backend-specific tests.
1866 return workload;
1867}
1868
1869template <typename ReshapeWorkload, armnn::DataType DataType>
1870std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
1871 armnn::Graph& graph)
1872{
1873 // Creates the layer we're testing.
1874 TensorShape outputShape({ 1, 4 });
1875 ReshapeDescriptor reshapeDesc;
1876 reshapeDesc.m_TargetShape = outputShape;
1877 Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
1878
1879 // Creates extra layers.
1880 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1881 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1882
1883 // Connects up.
1884 armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
1885 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
1886 Connect(input, layer, inputTensorInfo);
1887 Connect(layer, output, outputTensorInfo);
1888 CreateTensorHandles(graph, factory);
1889
1890 // Makes the workload and checks it.
1891 auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
1892
1893 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
1894 CHECK(queueDescriptor.m_Inputs.size() == 1);
1895 CHECK(queueDescriptor.m_Outputs.size() == 1);
1896
1897 // Returns so we can do extra, backend-specific tests.
1898 return workload;
1899}
1900
1901template <typename ConvertFp16ToFp32Float32Workload>
1902std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
1903 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
1904{
1905 // Creates the layer we're testing.
1906 ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
1907
1908 // Creates extra layers.
1909 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1910 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1911
1912 // Connects up.
1913 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
1914 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
1915 Connect(input, layer, inputTensorInfo);
1916 Connect(layer, output, outputTensorInfo);
1917 CreateTensorHandles(graph, factory);
1918
1919 // Makes the workload and checks it.
1920 auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
1921
1922 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
1923 CHECK(queueDescriptor.m_Inputs.size() == 1);
1924 CHECK(queueDescriptor.m_Outputs.size() == 1);
1925
1926 // Returns so we can do extra, backend-specific tests.
1927 return workload;
1928}
1929
1930template <typename ConvertFp32ToFp16Float16Workload>
1931std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
1932 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
1933{
1934 // Creates the layer we're testing.
1935 ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
1936
1937 // Creates extra layers.
1938 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1939 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1940
1941 // Connects up.
1942 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
1943 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
1944 Connect(input, layer, inputTensorInfo);
1945 Connect(layer, output, outputTensorInfo);
1946 CreateTensorHandles(graph, factory);
1947
1948 // Makes the workload and checks it.
1949 auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
1950
1951 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
1952 CHECK(queueDescriptor.m_Inputs.size() == 1);
1953 CHECK(queueDescriptor.m_Outputs.size() == 1);
1954
1955 // Returns so we can do extra, backend-specific tests.
1956 return workload;
1957}
1958
1959template <typename MeanWorkload, armnn::DataType DataType>
1960std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
1961{
1962 // Reduce along the first and second dimensions, and do not keep the reduced dimensions.
1963 MeanDescriptor descriptor({ 1, 2 }, false);
1964
1965 // Creates the layer we're testing.
1966 Layer* const layer = graph.AddLayer<MeanLayer>(descriptor, "mean");
1967
1968 // Creates extra layers.
1969 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1970 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1971
1972 // Connects up.
1973 armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType);
1974 armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType);
1975 Connect(input, layer, inputTensorInfo);
1976 Connect(layer, output, outputTensorInfo);
1977 CreateTensorHandles(graph, factory);
1978
1979 // Makes the workload and checks it.
1980 auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
1981
1982 MeanQueueDescriptor queueDescriptor = workload->GetData();
1983 CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
1984 CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
1985 CHECK(queueDescriptor.m_Inputs.size() == 1);
1986 CHECK(queueDescriptor.m_Outputs.size() == 1);
1987
1988 // Returns so we can do extra, backend-specific tests.
1989 return workload;
1990}
1991
1992template<typename ConcatWorkload, armnn::DataType DataType>
1993std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory,
1994 armnn::Graph &graph,
1995 const armnn::TensorShape &outputShape,
1996 unsigned int concatAxis)
1997{
1998 armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType);
1999 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
2000
2001 // Constructs the graph.
2002 Layer* const input0 = graph.AddLayer<InputLayer>(0, "input0");
2003 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
2004 armnn::OriginsDescriptor descriptor;
2005
2006 std::vector<armnn::TensorShape> inputShapes{{ 2, 3, 2, 5 }, { 2, 3, 2, 5 }};
2007
2008 descriptor = CreateDescriptorForConcatenation(inputShapes.begin(),
2009 inputShapes.end(),
2010 concatAxis);
2011
2012 // create concat layer
2013 Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
2014 CHECK(concat);
2015
2016 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
2017
2018 // Adds connections.
2019 // connect input0 to concat
2020 Connect(input0, concat, inputTensorInfo, 0, 0);
2021 // connect input1 to concat
2022 Connect(input1, concat, inputTensorInfo, 0, 1);
2023 // connect concat to output
2024 Connect(concat, output, outputTensorInfo, 0, 0);
2025
2026 // create tensor handles
2027 CreateTensorHandles(graph, factory);
2028
2029 // create concat workload
2030 auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
2031 CHECK(workloadConcat);
2032
2033 return workloadConcat;
2034}
2035
2036template <typename PreCompiledWorkload, armnn::DataType dataType>
2037std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> CreatePreCompiledWorkloadTest(
2038 armnn::IWorkloadFactory& factory,
2039 armnn::Graph& graph,
2040 bool biasEnabled = false)
2041{
2042 IgnoreUnused(graph);
2043
2044 // build up the structure of the network
2045 armnn::INetworkPtr net(armnn::INetwork::Create());
2046
2047 // Add an input layer
2048 armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
2049 CHECK(inputLayer);
2050
2051 // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
2052 // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
2053 // this test is using NHWC, so the weights shape is OHWI
2054 TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true);
2055 unsigned int weightsLength = weightsTensorInfo.GetNumElements();
2056
2057 using WeightType = armnn::ResolveType<dataType>;
2058 std::vector<WeightType> convWeightsData(weightsLength);
2059 for (unsigned int i = 0; i < weightsLength; ++i)
2060 {
2061 convWeightsData[i] = static_cast<WeightType>(i);
2062 }
2063
2064 armnn::ConstTensor weights(weightsTensorInfo, convWeightsData);
2065
2066 // Add a layer that can be used in the PreCompiled layer
2067 armnn::Convolution2dDescriptor convDesc2d;
2068 convDesc2d.m_StrideX = 1;
2069 convDesc2d.m_StrideY = 1;
2070 convDesc2d.m_BiasEnabled = biasEnabled;
2071 convDesc2d.m_DataLayout = armnn::DataLayout::NHWC;
2072
2073 armnn::IConnectableLayer* convLayer = nullptr;
2074 const std::string convLayerName("conv layer");
2075
2076 if (biasEnabled)
2077 {
2078 constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
2079 armnn::DataType::Signed32 : armnn::DataType::Float32;
2080
2081 TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true);
2082 unsigned int biasLength = biasTensorInfo.GetNumElements();
2083
2084 using BiasType = armnn::ResolveType<biasDataType>;
2085 std::vector<BiasType> biasData(biasLength);
2086 std::fill(biasData.begin(), biasData.end(), static_cast<BiasType>(0));
2087
2088 armnn::ConstTensor biases(biasTensorInfo, biasData);
2089
2090 // Create convolution layer with biases
2091 convLayer = net->AddConvolution2dLayer(convDesc2d,
2092 weights,
2093 Optional<ConstTensor>(biases),
2094 convLayerName.c_str());
2095 }
2096 else
2097 {
2098 // Create convolution layer without biases
2099 convLayer = net->AddConvolution2dLayer(convDesc2d,
2100 weights,
2101 EmptyOptional(),
2102 convLayerName.c_str());
2103 }
2104
2105 CHECK(convLayer);
2106
2107 // Add an output layer
2108 armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
2109 CHECK(outputLayer);
2110
2111 // set the tensors in the network (NHWC format)
2112 TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
2113 if (dataType == armnn::DataType::QAsymmU8)
2114 {
2115 inputTensorInfo.SetQuantizationOffset(0);
2116 inputTensorInfo.SetQuantizationScale(0.9f);
2117 }
2118
2119 TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
2120 if (dataType == armnn::DataType::QAsymmU8)
2121 {
2122 outputTensorInfo.SetQuantizationOffset(0);
2123 outputTensorInfo.SetQuantizationScale(0.9f);
2124 }
2125
2126 // Connect the layers
2127 inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
2128 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
2129
2130 convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
2131 convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2132
2133 // Optimize the network for the backend supported by the factory
2134 std::vector<armnn::BackendId> backends = {factory.GetBackendId()};
2135 armnn::IRuntime::CreationOptions options;
2136 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
2137 armnn::OptimizerOptions optimizerOptions;
2138 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
2139 optimizerOptions);
2140 CHECK(optimizedNet != nullptr);
2141
2142 // Find the PreCompiled layer in the optimised graph
2143 armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
2144 Layer* preCompiledLayer = nullptr;
2145 for (auto& layer : optimisedGraph)
2146 {
2147 if (layer->GetType() == LayerType::PreCompiled)
2148 {
2149 preCompiledLayer = layer;
2150 }
2151 }
2152 CHECK(preCompiledLayer != nullptr);
2153
2154 // Create the TensorHandles.
2155 CreateTensorHandles(optimisedGraph, factory);
2156
2157 // Make the workload and check it.
2158 auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
2159
2160 PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
2161 CHECK(queueDescriptor.m_Inputs.size() == 1);
2162 CHECK(queueDescriptor.m_Outputs.size() == 1);
2163
2164 // Returns the workload so we can do extra, backend-specific tests.
2165 // NOTE: We need to return the optimised network as well, otherwise it gets
2166 // out of scope and the tensor handles get destructed
2167 return std::make_pair(std::move(optimizedNet), std::move(workload));
2168}
2169
2170template<typename ConstantWorkload, armnn::DataType DataType>
2171std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFactory& factory,
2172 armnn::Graph& graph,
2173 const armnn::TensorShape& outputShape)
2174{
2175 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
2176
2177 // create constant layer
2178 auto constant = graph.AddLayer<ConstantLayer>("constant");
2179 CHECK(constant);
2180 constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
2181
2182 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
2183
2184 // Adds connections.
2185 // connect constant to output
2186 Connect(constant, output, outputTensorInfo, 0, 0);
2187
2188 // create tensor handles
2189 CreateTensorHandles(graph, factory);
2190
2191 // create Constant workload"
2192 auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
2193 CHECK(workloadConstant);
2194
2195 return workloadConstant;
2196}
2197
2198template <typename PreluWorkload>
2199std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory,
2200 armnn::Graph& graph,
2201 const armnn::TensorShape& inputShape,
2202 const armnn::TensorShape& alphaShape,
2203 const armnn::TensorShape& outputShape,
2204 armnn::DataType dataType)
2205{
2206 // Creates the PReLU layer
2207 Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
2208 CHECK(layer != nullptr);
2209
2210 // Creates extra layers
2211 Layer* const input = graph.AddLayer<InputLayer> (0, "input");
2212 Layer* const alpha = graph.AddLayer<InputLayer> (1, "alpha");
2213 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
2214 CHECK(input != nullptr);
2215 CHECK(alpha != nullptr);
2216 CHECK(output != nullptr);
2217
2218 // Connects up
2219 armnn::TensorInfo inputTensorInfo (inputShape, dataType);
2220 armnn::TensorInfo alphaTensorInfo (alphaShape, dataType);
2221 armnn::TensorInfo outputTensorInfo(outputShape, dataType);
2222 Connect(input, layer, inputTensorInfo, 0, 0);
2223 Connect(alpha, layer, alphaTensorInfo, 0, 1);
2224 Connect(layer, output, outputTensorInfo, 0, 0);
2225 CreateTensorHandles(graph, factory);
2226
2227 // Makes the workload and checks it
2228 auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
2229
2230 PreluQueueDescriptor queueDescriptor = workload->GetData();
2231 CHECK(queueDescriptor.m_Inputs.size() == 2);
2232 CHECK(queueDescriptor.m_Outputs.size() == 1);
2233
2234 // Returns so we can do extra, backend-specific tests.
2235 return workload;
2236}
2237
2238template <typename SpaceToDepthWorkload, armnn::DataType DataType>
2239std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory,
2240 armnn::Graph& graph)
2241{
2242 SpaceToDepthDescriptor desc;
2243 desc.m_BlockSize = 2;
2244 Layer* const layer = graph.AddLayer<SpaceToDepthLayer>(desc, "spaceToDepth");
2245
2246 // Creates extra layers.
2247 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
2248 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
2249
2250 // Connects up.
2251 armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType);
2252 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType);
2253
2254 Connect(input, layer, inputTensorInfo);
2255 Connect(layer, output, outputTensorInfo);
2256
2257 CreateTensorHandles(graph, factory);
2258
2259 // Makes the workload and checks it.
2260 auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
2261
2262 SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
2263 CHECK(queueDescriptor.m_Inputs.size() == 1);
2264 CHECK(queueDescriptor.m_Outputs.size() == 1);
2265
2266 return workload;
2267}
2268
2269template <typename StackWorkload, armnn::DataType DataType>
2270std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory& factory,
2271 armnn::Graph& graph,
2272 const armnn::TensorShape& inputShape,
2273 const armnn::TensorShape& outputShape,
2274 unsigned int axis,
2275 unsigned int numInputs)
2276{
2277 armnn::TensorInfo inputTensorInfo(inputShape, DataType);
2278 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
2279
2280 // Constructs the Stack layer.
2281 armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
2282 Layer* const stackLayer = graph.AddLayer<StackLayer>(descriptor, "stack");
2283 CHECK(stackLayer != nullptr);
2284
2285 // Constructs layer inputs and output.
2286 std::vector<Layer*> inputs;
2287 for (unsigned int i=0; i<numInputs; ++i)
2288 {
2289 inputs.push_back(graph.AddLayer<InputLayer>(
2290 static_cast<int>(i),
2291 ("input" + std::to_string(i)).c_str()
2292 ));
2293 CHECK(inputs[i] != nullptr);
2294 }
2295 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
2296 CHECK(output != nullptr);
2297
2298 // Adds connections.
2299 for (unsigned int i=0; i<numInputs; ++i)
2300 {
2301 Connect(inputs[i], stackLayer, inputTensorInfo, 0, i);
2302 }
2303 Connect(stackLayer, output, outputTensorInfo, 0, 0);
2304
2305 CreateTensorHandles(graph, factory);
2306
2307 auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
2308 StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
2309 CHECK(queueDescriptor.m_Inputs.size() == numInputs);
2310 CHECK(queueDescriptor.m_Outputs.size() == 1);
2311
2312 return stackWorkload;
2313}
2314
2315} // Anonymous namespace