blob: d09ebb612a97b80eb74a0cc8cced6ab5ac1391a5 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
David Beckac42efd2018-09-26 17:41:13 +01006#include <Graph.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/CpuTensorHandle.hpp>
9#include <backendsCommon/WorkloadData.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010010
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000011#include <boost/cast.hpp>
12#include <boost/test/unit_test.hpp>
13
14#include <utility>
telsoa01c577f2c2018-08-31 09:22:23 +010015
16using namespace armnn;
17using namespace std;
18
19// connects two layers
20void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
21{
22 from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
23 from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
24}
25
26/////////////////////////////////////////////////////////////////////////////////////////////
27// The following test are created specifically to test ReleaseConstantData() method in the Layer
28// They build very simple graphs including the layer will be checked.
29// Checks weights and biases before the method called and after.
30/////////////////////////////////////////////////////////////////////////////////////////////
31
32BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
33
34BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
35{
Aron Virginas-Tar56055192018-11-12 18:10:43 +000036 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +010037
38 // create the layer we're testing
39 BatchNormalizationDescriptor layerDesc;
40 layerDesc.m_Eps = 0.05f;
41 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
42
43 armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
44 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
45 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
46 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
47 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
48 layer->m_Mean->Allocate();
49 layer->m_Variance->Allocate();
50 layer->m_Beta->Allocate();
51 layer->m_Gamma->Allocate();
52
53 // create extra layers
54 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
55 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
56
57 // connect up
58 armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
59 Connect(input, layer, tensorInfo);
60 Connect(layer, output, tensorInfo);
61
62 // check the constants that they are not NULL
63 BOOST_CHECK(layer->m_Mean != nullptr);
64 BOOST_CHECK(layer->m_Variance != nullptr);
65 BOOST_CHECK(layer->m_Beta != nullptr);
66 BOOST_CHECK(layer->m_Gamma != nullptr);
67
68 // free up the constants..
69 layer->ReleaseConstantData();
70
71 // check the constants that they are NULL now
72 BOOST_CHECK(layer->m_Mean == nullptr);
73 BOOST_CHECK(layer->m_Variance == nullptr);
74 BOOST_CHECK(layer->m_Beta == nullptr);
75 BOOST_CHECK(layer->m_Gamma == nullptr);
76
77 }
78
79
80 BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
81 {
Aron Virginas-Tar56055192018-11-12 18:10:43 +000082 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +010083
84 // create the layer we're testing
85 Convolution2dDescriptor layerDesc;
86 layerDesc.m_PadLeft = 3;
87 layerDesc.m_PadRight = 3;
88 layerDesc.m_PadTop = 1;
89 layerDesc.m_PadBottom = 1;
90 layerDesc.m_StrideX = 2;
91 layerDesc.m_StrideY = 4;
92 layerDesc.m_BiasEnabled = true;
93
94 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
95
96 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
97 armnn::DataType::Float32));
98 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
99 (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
100
101 layer->m_Weight->Allocate();
102 layer->m_Bias->Allocate();
103
104 // create extra layers
105 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
106 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
107
108 // connect up
109 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
110 Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
111
112 // check the constants that they are not NULL
113 BOOST_CHECK(layer->m_Weight != nullptr);
114 BOOST_CHECK(layer->m_Bias != nullptr);
115
116 // free up the constants..
117 layer->ReleaseConstantData();
118
119 // check the constants that they are NULL now
120 BOOST_CHECK(layer->m_Weight == nullptr);
121 BOOST_CHECK(layer->m_Bias == nullptr);
122}
123
124BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
125{
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000126 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +0100127
128 // create the layer we're testing
129 DepthwiseConvolution2dDescriptor layerDesc;
130 layerDesc.m_PadLeft = 3;
131 layerDesc.m_PadRight = 3;
132 layerDesc.m_PadTop = 1;
133 layerDesc.m_PadBottom = 1;
134 layerDesc.m_StrideX = 2;
135 layerDesc.m_StrideY = 4;
136 layerDesc.m_BiasEnabled = true;
137
138 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
139
140 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
141 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
142 layer->m_Weight->Allocate();
143 layer->m_Bias->Allocate();
144
145 // create extra layers
146 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
147 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
148
149 // connect up
150 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
151 Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
152
153 // check the constants that they are not NULL
154 BOOST_CHECK(layer->m_Weight != nullptr);
155 BOOST_CHECK(layer->m_Bias != nullptr);
156
157 // free up the constants..
158 layer->ReleaseConstantData();
159
160 // check the constants that they are NULL now
161 BOOST_CHECK(layer->m_Weight == nullptr);
162 BOOST_CHECK(layer->m_Bias == nullptr);
163}
164
165BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
166{
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000167 Graph graph;
telsoa01c577f2c2018-08-31 09:22:23 +0100168
169 // create the layer we're testing
170 FullyConnectedDescriptor layerDesc;
171 layerDesc.m_BiasEnabled = true;
172 layerDesc.m_TransposeWeightMatrix = true;
173
174 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
175
176 float inputsQScale = 1.0f;
177 float outputQScale = 2.0f;
178
179 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
180 DataType::QuantisedAsymm8, inputsQScale, 0));
181 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
182 GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
183 layer->m_Weight->Allocate();
184 layer->m_Bias->Allocate();
185
186 // create extra layers
187 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
188 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
189
190 // connect up
191 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
192 Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
193
194 // check the constants that they are not NULL
195 BOOST_CHECK(layer->m_Weight != nullptr);
196 BOOST_CHECK(layer->m_Bias != nullptr);
197
198 // free up the constants..
199 layer->ReleaseConstantData();
200
201 // check the constants that they are NULL now
202 BOOST_CHECK(layer->m_Weight == nullptr);
203 BOOST_CHECK(layer->m_Bias == nullptr);
204}
205
206BOOST_AUTO_TEST_SUITE_END()
207