blob: 905e9a1d63b95f28ff5d852f7cb6db0887a63b79 [file] [log] [blame]
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <ResolveType.hpp>
9
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +010010
Colm Donelanc42a9872022-02-02 16:35:09 +000011#include <armnnUtils/QuantizeHelper.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010012
Sadik Armagana097d2a2021-11-24 15:47:28 +000013#include <armnnTestUtils/DataLayoutUtils.hpp>
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +010014
15namespace
16{
17
18armnn::INetworkPtr CreateDepthToSpaceNetwork(const armnn::TensorInfo& inputInfo,
19 const armnn::TensorInfo& outputInfo,
20 const armnn::DepthToSpaceDescriptor& descriptor)
21{
22 using namespace armnn;
23
24 INetworkPtr network(INetwork::Create());
25
26 IConnectableLayer* input = network->AddInputLayer(0, "input");
27 IConnectableLayer* depthToSpace = network->AddDepthToSpaceLayer(descriptor, "depthToSpace");
28 IConnectableLayer* output = network->AddOutputLayer(0, "output");
29
30 Connect(input, depthToSpace, inputInfo, 0, 0);
31 Connect(depthToSpace, output, outputInfo, 0, 0);
32
33 return network;
34}
35
36template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
37void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends,
38 const DepthToSpaceDescriptor& descriptor,
39 const armnn::TensorShape& nhwcInputShape,
40 const armnn::TensorShape& nhwcOutputShape,
41 const std::vector<float>& floatInputData,
42 const std::vector<float>& floatExpectedOutputData)
43{
44 using namespace armnn;
45
46 TensorInfo inputInfo(nhwcInputShape, ArmnnType);
Cathal Corbett5b8093c2021-10-22 11:12:07 +010047 inputInfo.SetConstant(true);
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +010048 TensorInfo outputInfo(nhwcOutputShape, ArmnnType);
49
50 constexpr float qScale = 0.25f;
51 constexpr int32_t qOffset = 128;
52
53 // Set quantization parameters for quantized types
54 if (IsQuantizedType<T>())
55 {
56 inputInfo.SetQuantizationScale(qScale);
57 inputInfo.SetQuantizationOffset(qOffset);
58 outputInfo.SetQuantizationScale(qScale);
59 outputInfo.SetQuantizationOffset(qOffset);
60 }
61
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010062 std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData, qScale, qOffset);
63 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +010064
65 // Permute tensors from NHWC to NCHW (if needed)
66 if (descriptor.m_DataLayout == DataLayout::NCHW)
67 {
68 PermuteTensorNhwcToNchw(inputInfo, inputData);
69 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
70 }
71
72 INetworkPtr network = CreateDepthToSpaceNetwork(inputInfo, outputInfo, descriptor);
73 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
74 { { 0, inputData } },
75 { { 0, expectedOutputData } },
76 backends);
77}
78
79} // anonymous namespace
80
81template<armnn::DataType ArmnnType>
82void DepthToSpaceEndToEnd(const std::vector<armnn::BackendId>& defaultBackends,
83 armnn::DataLayout dataLayout)
84{
85 using namespace armnn;
86
87 TensorShape inputShape = { 2, 2, 2, 4 };
88 TensorShape outputShape = { 2, 4, 4, 1 };
89
90 std::vector<float> inputData =
91 {
92 1.f, 2.f, 3.f, 4.f,
93 5.f, 6.f, 7.f, 8.f,
94 9.f, 10.f, 11.f, 12.f,
95 13.f, 14.f, 15.f, 16.f,
96
97 17.f, 18.f, 19.f, 20.f,
98 21.f, 22.f, 23.f, 24.f,
99 25.f, 26.f, 27.f, 28.f,
100 29.f, 30.f, 31.f, 32.f
101 };
102
103 std::vector<float> expectedOutputData =
104 {
105 1.f, 2.f, 5.f, 6.f,
106 3.f, 4.f, 7.f, 8.f,
107 9.f, 10.f, 13.f, 14.f,
108 11.f, 12.f, 15.f, 16.f,
109
110 17.f, 18.f, 21.f, 22.f,
111 19.f, 20.f, 23.f, 24.f,
112 25.f, 26.f, 29.f, 30.f,
113 27.f, 28.f, 31.f, 32.f
114 };
115
116 DepthToSpaceEndToEndImpl<ArmnnType>(defaultBackends,
117 DepthToSpaceDescriptor(2, dataLayout),
118 inputShape,
119 outputShape,
120 inputData,
121 expectedOutputData);
122}