blob: 9d6312ea538d34235877bf3273fa42a8829e3ab9 [file] [log] [blame]
Aron Virginas-Tar98180ef2019-06-26 15:02:47 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "QuantizeHelper.hpp"
8
9#include <armnn/ArmNN.hpp>
10
11#include <Permute.hpp>
12#include <ResolveType.hpp>
13
14#include <backendsCommon/test/CommonTestUtils.hpp>
15
16#include <boost/test/unit_test.hpp>
17
18#include <map>
19#include <vector>
20
21namespace
22{
23
24INetworkPtr CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor& descriptor,
25 const armnn::TensorInfo& inputInfo,
26 const armnn::TensorInfo& outputInfo,
27 const armnn::ConstTensor& weights,
28 const armnn::Optional<armnn::ConstTensor>& biases)
29{
30 using namespace armnn;
31
32 INetworkPtr network(INetwork::Create());
33 IConnectableLayer* input = network->AddInputLayer(0, "input");
34 IConnectableLayer* transposeConvolution2d =
35 network->AddTransposeConvolution2dLayer(descriptor, weights, biases, "transposeConvolution2d");
36 IConnectableLayer* output = network->AddOutputLayer(0, "output");
37
38 Connect(input, transposeConvolution2d, inputInfo, 0, 0);
39 Connect(transposeConvolution2d, output, outputInfo, 0, 0);
40
41 return network;
42}
43
44} // anonymous namespace
45
46template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
47void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
48 armnn::DataLayout dataLayout)
49{
50 using namespace armnn;
51 using T = ResolveType<ArmnnType>;
52
53 constexpr unsigned int batches = 1u;
54 constexpr unsigned int channels = 1u;
55
56 constexpr unsigned int wInput = 3u;
57 constexpr unsigned int hInput = wInput;
58
59 constexpr unsigned int wOutput = 5u;
60 constexpr unsigned int hOutput = wOutput;
61
62 constexpr unsigned int wWeights = 3u;
63 constexpr unsigned int hWeights = wWeights;
64
65 TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, dataLayout);
66 TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout);
67 TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout);
68
69 const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
70 const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
71
72 TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
73 TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
74 TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset);
75 TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0);
76
77 std::vector<float> inputData =
78 {
79 1.f, 1.f, 1.f,
80 1.f, 1.f, 1.f,
81 1.f, 1.f, 1.f
82 };
83
84 std::vector<float> weightsData =
85 {
86 1.f, 2.f, 3.f,
87 4.f, 5.f, 6.f,
88 7.f, 8.f, 9.f
89 };
90
91 std::vector<float> biasesData = { 1.f };
92
93 std::vector<float> expectedOutputData =
94 {
95 6.f, 11.f, 6.f, 11.f, 6.f,
96 11.f, 21.f, 11.f, 21.f, 11.f,
97 6.f, 11.f, 6.f, 11.f, 6.f,
98 11.f, 21.f, 11.f, 21.f, 11.f,
99 6.f, 11.f, 6.f, 11.f, 6.f
100 };
101
102 TransposeConvolution2dDescriptor descriptor;
103 descriptor.m_PadLeft = 1;
104 descriptor.m_PadRight = 1;
105 descriptor.m_PadTop = 1;
106 descriptor.m_PadBottom = 1;
107 descriptor.m_StrideX = 2;
108 descriptor.m_StrideY = 2;
109 descriptor.m_BiasEnabled = true;
110 descriptor.m_DataLayout = dataLayout;
111
112 // swizzle data if needed
113 if (dataLayout == armnn::DataLayout::NHWC)
114 {
115 constexpr size_t dataTypeSize = sizeof(float);
116 const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
117
118 std::vector<float> tmp(inputData.size());
119 armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
120 inputData = tmp;
121
122 tmp.resize(weightsData.size());
123 armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
124 weightsData = tmp;
125
126 tmp.resize(expectedOutputData.size());
127 armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize);
128 expectedOutputData = tmp;
129 }
130
131 // quantize data
132 std::vector<T> qInputData = QuantizedVector<T>(qScale, qOffset, inputData);
133 std::vector<T> qWeightsData = QuantizedVector<T>(qScale, qOffset, weightsData);
134 std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
135
136 using BT = ResolveType<ArmnnBType>;
137 std::vector<BT> qBiasesData = QuantizedVector<BT>(qScale * qScale, 0, biasesData);
138
139 ConstTensor weights(weightsInfo, qWeightsData);
140 ConstTensor biases(biasesInfo, qBiasesData);
141
142 INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
143 inputInfo,
144 outputInfo,
145 weights,
146 Optional<ConstTensor>(biases));
147
148
149 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
150 { { 0, qInputData } },
151 { { 0, qExpectedOutputData } },
152 backends);
153}