blob: 08a8bc3211e1e5bab957108662c67a5a145222fc [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Layer.hpp"
6
7#include "Graph.hpp"
David Beckac42efd2018-09-26 17:41:13 +01008#include <backends/WorkloadData.hpp>
9#include <backends/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/cast.hpp>
12#include <boost/format.hpp>
13#include <boost/log/trivial.hpp>
14
15#include <numeric>
16
17namespace armnn
18{
19
20void InputSlot::Insert(Layer& layer)
21{
telsoa014fcda012018-03-09 14:13:49 +000022 BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
23
24 OutputSlot* const prevSlot = GetConnectedOutputSlot();
25
26 if (prevSlot != nullptr)
27 {
telsoa01c577f2c2018-08-31 09:22:23 +010028 // Disconnects parent from this.
telsoa014fcda012018-03-09 14:13:49 +000029 prevSlot->Disconnect(*this);
30
telsoa01c577f2c2018-08-31 09:22:23 +010031 // Connects inserted layer to parent.
telsoa014fcda012018-03-09 14:13:49 +000032 BOOST_ASSERT(layer.GetNumInputSlots() == 1);
33 prevSlot->Connect(layer.GetInputSlot(0));
34
telsoa01c577f2c2018-08-31 09:22:23 +010035 // Sets tensor info for inserted layer.
telsoa014fcda012018-03-09 14:13:49 +000036 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
37 layer.GetOutputHandler().SetTensorInfo(tensorInfo);
38 }
39
telsoa01c577f2c2018-08-31 09:22:23 +010040 // Connects inserted layer to this.
telsoa014fcda012018-03-09 14:13:49 +000041 layer.GetOutputSlot(0).Connect(*this);
42}
43
44const InputSlot* OutputSlot::GetConnection(unsigned int index) const
45{
46 ValidateConnectionIndex(index);
47 return m_Connections[index];
48}
49
50InputSlot* OutputSlot::GetConnection(unsigned int index)
51{
52 ValidateConnectionIndex(index);
53 return m_Connections[index];
54}
55
56void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
57{
58 GetOutputHandler().SetTensorInfo(tensorInfo);
59}
60
61const TensorInfo& OutputSlot::GetTensorInfo() const
62{
63 return GetOutputHandler().GetTensorInfo();
64}
65
66bool OutputSlot::IsTensorInfoSet() const
67{
68 return GetOutputHandler().IsTensorInfoSet();
69}
70
71bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
72{
73 BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
74 return shape == m_OutputHandler.GetTensorInfo().GetShape();
75}
76
77int OutputSlot::Connect(InputSlot& destination)
78{
79 destination.SetConnection(this);
80 m_Connections.push_back(&destination);
81 return boost::numeric_cast<int>(m_Connections.size() - 1);
82}
83
84void OutputSlot::Disconnect(InputSlot& slot)
85{
86 slot.SetConnection(nullptr);
87 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
88}
89
90void OutputSlot::DisconnectAll()
91{
92 while (GetNumConnections() > 0)
93 {
94 InputSlot& connection = *GetConnection(0);
95 Disconnect(connection);
96 }
97}
98
99void OutputSlot::MoveAllConnections(OutputSlot& destination)
100{
101 while (GetNumConnections() > 0)
102 {
103 InputSlot& connection = *GetConnection(0);
104 Disconnect(connection);
105 destination.Connect(connection);
106 }
107}
108
109void OutputSlot::ValidateConnectionIndex(unsigned int index) const
110{
111 if (boost::numeric_cast<std::size_t>(index) >= m_Connections.size())
112 {
113 throw InvalidArgumentException(
114 boost::str(boost::format("GetConnection: Invalid index %1% provided") % index));
115 }
116}
117
surmeh01bceff2f2018-03-29 16:29:27 +0100118namespace {
119LayerGuid GenerateLayerGuid()
120{
telsoa01c577f2c2018-08-31 09:22:23 +0100121 // Note: Not thread safe.
surmeh01bceff2f2018-03-29 16:29:27 +0100122 static LayerGuid newGuid=0;
123 return newGuid++;
124}
telsoa01c577f2c2018-08-31 09:22:23 +0100125} // namespace
surmeh01bceff2f2018-03-29 16:29:27 +0100126
Derek Lamberti0cff1632018-09-18 16:02:25 +0100127Layer::Layer(unsigned int numInputSlots,
128 unsigned int numOutputSlots,
129 LayerType type,
130 DataLayout layout,
131 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000132: m_OutputHandlers(numOutputSlots)
133, m_LayerName(name ? name : "")
134, m_Type(type)
Derek Lamberti0cff1632018-09-18 16:02:25 +0100135, m_DataLayout(layout)
David Beck33f0ae02018-10-18 15:13:56 +0100136, m_BackendId(UninitializedBackendId())
surmeh01bceff2f2018-03-29 16:29:27 +0100137, m_Guid(GenerateLayerGuid())
telsoa014fcda012018-03-09 14:13:49 +0000138{
139 m_InputSlots.reserve(numInputSlots);
140 for (unsigned int i = 0; i < numInputSlots; ++i)
141 {
142 m_InputSlots.emplace_back(*this, i);
143 }
144
145 m_OutputSlots.reserve(numOutputSlots);
146 for (unsigned int i = 0; i < numOutputSlots; ++i)
147 {
148 m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
149 }
150}
151
Derek Lamberti0cff1632018-09-18 16:02:25 +0100152Layer::Layer(unsigned int numInputSlots,
153 unsigned int numOutputSlots,
154 LayerType type,
155 const char* name)
156: Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
157{
158}
159
telsoa014fcda012018-03-09 14:13:49 +0000160void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
161{
162 for (auto&& inputSlot : GetInputSlots())
163 {
telsoa01c577f2c2018-08-31 09:22:23 +0100164 // The graph must be well-formed at this point.
telsoa014fcda012018-03-09 14:13:49 +0000165 BOOST_ASSERT(inputSlot.GetConnection());
166 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
167 dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
168 }
169}
170
171void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
172{
173 for (auto&& outputHandler : m_OutputHandlers)
174 {
175 outputHandler.CollectWorkloadOutputs(dataCollector);
176 }
177}
178
179void Layer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
180{
181 for (auto&& outputHandler : m_OutputHandlers)
182 {
183 outputHandler.CreateTensorHandles(factory);
184 }
185}
186
telsoa01c577f2c2018-08-31 09:22:23 +0100187void Layer::ReleaseConstantData()
188{
189 // Now free up the static data.
190 OperateOnConstantTensors([](std::unique_ptr<ScopedCpuTensorHandle>& handle)
191 {
192 handle.reset(nullptr);
193 });
194}
195
telsoa014fcda012018-03-09 14:13:49 +0000196DataType Layer::GetDataType() const
197{
telsoa01c577f2c2018-08-31 09:22:23 +0100198 if (GetNumInputSlots() > 0) // Ignore the input layer.
telsoa014fcda012018-03-09 14:13:49 +0000199 {
200 return GetInputSlot(0).GetConnection()->GetTensorInfo().GetDataType();
201 }
telsoa01c577f2c2018-08-31 09:22:23 +0100202 return GetOutputSlot(0).GetTensorInfo().GetDataType();
telsoa014fcda012018-03-09 14:13:49 +0000203}
204
205void Layer::ResetPriority() const
206{
207 m_Priority = 0;
208 m_Visiting = false;
209}
210
211LayerPriority Layer::GetPriority() const
212{
213 constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
214 constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
215
216 if (GetType() == LayerType::Input)
217 {
218 m_Priority = inputPrio;
219 }
220 else if (GetType() == LayerType::Output)
221 {
222 m_Priority = outputPrio;
223 }
224 else if (m_Priority == 0)
225 {
226 if (m_Visiting)
227 {
228 throw GraphValidationException("Graph has circular dependencies: cannot walk");
229 }
230
231 auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
232 {
233 const Layer& input = slot.GetConnectedOutputSlot()->GetOwningLayer();
234 return std::max(prio, input.GetPriority());
235 };
236
237 m_Visiting = true;
238 LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
239 m_Visiting = false;
240
241 if (parentPrio >= outputPrio)
242 {
243 throw GraphValidationException("Graph has too many edges");
244 }
245
246 m_Priority = parentPrio + 1U;
247 }
248
249 return m_Priority;
250}
251
telsoa01c577f2c2018-08-31 09:22:23 +0100252void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
253{
254 BOOST_ASSERT(GetNumInputSlots() == expectedConnections);
255
256 for (unsigned int i=0; i<expectedConnections; ++i)
257 {
258 if (GetInputSlot(i).GetConnection() == nullptr)
259 {
260 throw LayerValidationException(
261 boost::str(
262 boost::format(
263 "Input connection #%1% must be connected "
264 "for %2% layer %3% %4%")
265 % i
266 % GetLayerTypeAsCString(this->GetType())
267 % GetNameStr()
268 % location.AsString()));
269 }
270 if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
271 {
272 throw LayerValidationException(
273 boost::str(
274 boost::format(
275 "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
276 "%2% layer %3% %4%")
277 % i
278 % GetLayerTypeAsCString(this->GetType())
279 % GetNameStr()
280 % location.AsString()));
281 }
282 }
283}
284
285std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
286{
287 BOOST_ASSERT(GetNumInputSlots() != 0);
288 BOOST_ASSERT(GetNumOutputSlots() != 0);
289
290 // By default we return what we got, meaning the output shape(s) are the same as the input(s).
291 // This only works if the number of inputs and outputs are the same. Since we are in the Layer
292 // base class, this means the implementation needs to be overridden in the specific layers for
293 // the other cases. So the missing implementation justifies the UnimplementedException.
294
295 if (GetNumInputSlots() != GetNumOutputSlots())
296 {
297 throw UnimplementedException(
298 boost::str(
299 boost::format(
300 "Default implementation for InferOutputShapes can only be used for "
301 "layers with the same number of input and output slots. This doesn't "
302 "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
303 % GetLayerTypeAsCString(this->GetType())
304 % GetNameStr()
305 % GetNumInputSlots()
306 % GetNumOutputSlots()
307 % CHECK_LOCATION().AsString()));
308 }
309 return inputShapes;
310}
311
telsoa014fcda012018-03-09 14:13:49 +0000312} // namespace armnn