blob: 817cdeed7938981c93d026d17f6e4323947674cc [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
Matteo Martincighbf0e7222019-06-20 17:17:45 +01006#include "CommonTestUtils.hpp"
7
David Beckac42efd2018-09-26 17:41:13 +01008#include <Graph.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <backendsCommon/CpuTensorHandle.hpp>
11#include <backendsCommon/WorkloadData.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000013#include <boost/test/unit_test.hpp>
14
15#include <utility>
telsoa01c577f2c2018-08-31 09:22:23 +010016
17using namespace armnn;
18using namespace std;
19
telsoa01c577f2c2018-08-31 09:22:23 +010020/////////////////////////////////////////////////////////////////////////////////////////////
21// The following test are created specifically to test ReleaseConstantData() method in the Layer
22// They build very simple graphs including the layer will be checked.
23// Checks weights and biases before the method called and after.
24/////////////////////////////////////////////////////////////////////////////////////////////
25
26BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
27
28BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
29{
Aron Virginas-Tar56055192018-11-12 18:10:43 +000030 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +010031
32 // create the layer we're testing
33 BatchNormalizationDescriptor layerDesc;
34 layerDesc.m_Eps = 0.05f;
35 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
36
37 armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
38 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
39 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
40 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
41 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
42 layer->m_Mean->Allocate();
43 layer->m_Variance->Allocate();
44 layer->m_Beta->Allocate();
45 layer->m_Gamma->Allocate();
46
47 // create extra layers
48 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
49 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
50
51 // connect up
52 armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
53 Connect(input, layer, tensorInfo);
54 Connect(layer, output, tensorInfo);
55
56 // check the constants that they are not NULL
57 BOOST_CHECK(layer->m_Mean != nullptr);
58 BOOST_CHECK(layer->m_Variance != nullptr);
59 BOOST_CHECK(layer->m_Beta != nullptr);
60 BOOST_CHECK(layer->m_Gamma != nullptr);
61
62 // free up the constants..
63 layer->ReleaseConstantData();
64
65 // check the constants that they are NULL now
66 BOOST_CHECK(layer->m_Mean == nullptr);
67 BOOST_CHECK(layer->m_Variance == nullptr);
68 BOOST_CHECK(layer->m_Beta == nullptr);
69 BOOST_CHECK(layer->m_Gamma == nullptr);
70
71 }
72
73
74 BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
75 {
Aron Virginas-Tar56055192018-11-12 18:10:43 +000076 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +010077
78 // create the layer we're testing
79 Convolution2dDescriptor layerDesc;
80 layerDesc.m_PadLeft = 3;
81 layerDesc.m_PadRight = 3;
82 layerDesc.m_PadTop = 1;
83 layerDesc.m_PadBottom = 1;
84 layerDesc.m_StrideX = 2;
85 layerDesc.m_StrideY = 4;
86 layerDesc.m_BiasEnabled = true;
87
88 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
89
90 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
91 armnn::DataType::Float32));
92 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
93 (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
94
95 layer->m_Weight->Allocate();
96 layer->m_Bias->Allocate();
97
98 // create extra layers
99 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
100 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
101
102 // connect up
103 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
104 Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
105
106 // check the constants that they are not NULL
107 BOOST_CHECK(layer->m_Weight != nullptr);
108 BOOST_CHECK(layer->m_Bias != nullptr);
109
110 // free up the constants..
111 layer->ReleaseConstantData();
112
113 // check the constants that they are NULL now
114 BOOST_CHECK(layer->m_Weight == nullptr);
115 BOOST_CHECK(layer->m_Bias == nullptr);
116}
117
118BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
119{
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000120 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +0100121
122 // create the layer we're testing
123 DepthwiseConvolution2dDescriptor layerDesc;
124 layerDesc.m_PadLeft = 3;
125 layerDesc.m_PadRight = 3;
126 layerDesc.m_PadTop = 1;
127 layerDesc.m_PadBottom = 1;
128 layerDesc.m_StrideX = 2;
129 layerDesc.m_StrideY = 4;
130 layerDesc.m_BiasEnabled = true;
131
132 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
133
134 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
135 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
136 layer->m_Weight->Allocate();
137 layer->m_Bias->Allocate();
138
139 // create extra layers
140 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
141 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
142
143 // connect up
144 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
145 Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
146
147 // check the constants that they are not NULL
148 BOOST_CHECK(layer->m_Weight != nullptr);
149 BOOST_CHECK(layer->m_Bias != nullptr);
150
151 // free up the constants..
152 layer->ReleaseConstantData();
153
154 // check the constants that they are NULL now
155 BOOST_CHECK(layer->m_Weight == nullptr);
156 BOOST_CHECK(layer->m_Bias == nullptr);
157}
158
159BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
160{
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000161 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +0100162
163 // create the layer we're testing
164 FullyConnectedDescriptor layerDesc;
165 layerDesc.m_BiasEnabled = true;
166 layerDesc.m_TransposeWeightMatrix = true;
167
168 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
169
170 float inputsQScale = 1.0f;
171 float outputQScale = 2.0f;
172
173 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000174 DataType::QAsymmU8, inputsQScale, 0));
telsoa01c577f2c2018-08-31 09:22:23 +0100175 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
Derek Lambertif90c56d2020-01-10 17:14:08 +0000176 GetBiasDataType(DataType::QAsymmU8), inputsQScale));
telsoa01c577f2c2018-08-31 09:22:23 +0100177 layer->m_Weight->Allocate();
178 layer->m_Bias->Allocate();
179
180 // create extra layers
181 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
182 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
183
184 // connect up
Derek Lambertif90c56d2020-01-10 17:14:08 +0000185 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
186 Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
telsoa01c577f2c2018-08-31 09:22:23 +0100187
188 // check the constants that they are not NULL
189 BOOST_CHECK(layer->m_Weight != nullptr);
190 BOOST_CHECK(layer->m_Bias != nullptr);
191
192 // free up the constants..
193 layer->ReleaseConstantData();
194
195 // check the constants that they are NULL now
196 BOOST_CHECK(layer->m_Weight == nullptr);
197 BOOST_CHECK(layer->m_Bias == nullptr);
198}
199
200BOOST_AUTO_TEST_SUITE_END()
201