blob: cf825e39fd48e12f0f780a63ba1a57234a71f2a9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "Layer.hpp"
6
7#include "Graph.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/WorkloadData.hpp>
9#include <backendsCommon/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/cast.hpp>
12#include <boost/format.hpp>
13#include <boost/log/trivial.hpp>
14
15#include <numeric>
16
17namespace armnn
18{
19
20void InputSlot::Insert(Layer& layer)
21{
telsoa014fcda012018-03-09 14:13:49 +000022 BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
23
24 OutputSlot* const prevSlot = GetConnectedOutputSlot();
25
26 if (prevSlot != nullptr)
27 {
telsoa01c577f2c2018-08-31 09:22:23 +010028 // Disconnects parent from this.
telsoa014fcda012018-03-09 14:13:49 +000029 prevSlot->Disconnect(*this);
30
telsoa01c577f2c2018-08-31 09:22:23 +010031 // Connects inserted layer to parent.
telsoa014fcda012018-03-09 14:13:49 +000032 BOOST_ASSERT(layer.GetNumInputSlots() == 1);
33 prevSlot->Connect(layer.GetInputSlot(0));
34
telsoa01c577f2c2018-08-31 09:22:23 +010035 // Sets tensor info for inserted layer.
telsoa014fcda012018-03-09 14:13:49 +000036 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
37 layer.GetOutputHandler().SetTensorInfo(tensorInfo);
38 }
39
telsoa01c577f2c2018-08-31 09:22:23 +010040 // Connects inserted layer to this.
telsoa014fcda012018-03-09 14:13:49 +000041 layer.GetOutputSlot(0).Connect(*this);
42}
43
44const InputSlot* OutputSlot::GetConnection(unsigned int index) const
45{
46 ValidateConnectionIndex(index);
47 return m_Connections[index];
48}
49
50InputSlot* OutputSlot::GetConnection(unsigned int index)
51{
52 ValidateConnectionIndex(index);
53 return m_Connections[index];
54}
55
56void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
57{
58 GetOutputHandler().SetTensorInfo(tensorInfo);
59}
60
61const TensorInfo& OutputSlot::GetTensorInfo() const
62{
63 return GetOutputHandler().GetTensorInfo();
64}
65
66bool OutputSlot::IsTensorInfoSet() const
67{
68 return GetOutputHandler().IsTensorInfoSet();
69}
70
71bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
72{
73 BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
74 return shape == m_OutputHandler.GetTensorInfo().GetShape();
75}
76
77int OutputSlot::Connect(InputSlot& destination)
78{
79 destination.SetConnection(this);
80 m_Connections.push_back(&destination);
81 return boost::numeric_cast<int>(m_Connections.size() - 1);
82}
83
84void OutputSlot::Disconnect(InputSlot& slot)
85{
86 slot.SetConnection(nullptr);
87 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
88}
89
90void OutputSlot::DisconnectAll()
91{
92 while (GetNumConnections() > 0)
93 {
94 InputSlot& connection = *GetConnection(0);
95 Disconnect(connection);
96 }
97}
98
99void OutputSlot::MoveAllConnections(OutputSlot& destination)
100{
101 while (GetNumConnections() > 0)
102 {
103 InputSlot& connection = *GetConnection(0);
104 Disconnect(connection);
105 destination.Connect(connection);
106 }
107}
108
109void OutputSlot::ValidateConnectionIndex(unsigned int index) const
110{
111 if (boost::numeric_cast<std::size_t>(index) >= m_Connections.size())
112 {
113 throw InvalidArgumentException(
114 boost::str(boost::format("GetConnection: Invalid index %1% provided") % index));
115 }
116}
117
surmeh01bceff2f2018-03-29 16:29:27 +0100118namespace {
119LayerGuid GenerateLayerGuid()
120{
telsoa01c577f2c2018-08-31 09:22:23 +0100121 // Note: Not thread safe.
surmeh01bceff2f2018-03-29 16:29:27 +0100122 static LayerGuid newGuid=0;
123 return newGuid++;
124}
telsoa01c577f2c2018-08-31 09:22:23 +0100125} // namespace
surmeh01bceff2f2018-03-29 16:29:27 +0100126
Derek Lamberti0cff1632018-09-18 16:02:25 +0100127Layer::Layer(unsigned int numInputSlots,
128 unsigned int numOutputSlots,
129 LayerType type,
130 DataLayout layout,
131 const char* name)
telsoa014fcda012018-03-09 14:13:49 +0000132: m_OutputHandlers(numOutputSlots)
133, m_LayerName(name ? name : "")
134, m_Type(type)
David Beck33f0ae02018-10-18 15:13:56 +0100135, m_BackendId(UninitializedBackendId())
surmeh01bceff2f2018-03-29 16:29:27 +0100136, m_Guid(GenerateLayerGuid())
telsoa014fcda012018-03-09 14:13:49 +0000137{
138 m_InputSlots.reserve(numInputSlots);
139 for (unsigned int i = 0; i < numInputSlots; ++i)
140 {
141 m_InputSlots.emplace_back(*this, i);
142 }
143
144 m_OutputSlots.reserve(numOutputSlots);
145 for (unsigned int i = 0; i < numOutputSlots; ++i)
146 {
147 m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
148 }
149}
150
Derek Lamberti0cff1632018-09-18 16:02:25 +0100151Layer::Layer(unsigned int numInputSlots,
152 unsigned int numOutputSlots,
153 LayerType type,
154 const char* name)
155: Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
156{
157}
158
telsoa014fcda012018-03-09 14:13:49 +0000159void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
160{
161 for (auto&& inputSlot : GetInputSlots())
162 {
telsoa01c577f2c2018-08-31 09:22:23 +0100163 // The graph must be well-formed at this point.
telsoa014fcda012018-03-09 14:13:49 +0000164 BOOST_ASSERT(inputSlot.GetConnection());
165 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
166 dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
167 }
168}
169
170void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const
171{
172 for (auto&& outputHandler : m_OutputHandlers)
173 {
174 outputHandler.CollectWorkloadOutputs(dataCollector);
175 }
176}
177
178void Layer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
179{
180 for (auto&& outputHandler : m_OutputHandlers)
181 {
182 outputHandler.CreateTensorHandles(factory);
183 }
184}
185
telsoa01c577f2c2018-08-31 09:22:23 +0100186void Layer::ReleaseConstantData()
187{
188 // Now free up the static data.
189 OperateOnConstantTensors([](std::unique_ptr<ScopedCpuTensorHandle>& handle)
190 {
191 handle.reset(nullptr);
192 });
193}
194
telsoa014fcda012018-03-09 14:13:49 +0000195DataType Layer::GetDataType() const
196{
telsoa01c577f2c2018-08-31 09:22:23 +0100197 if (GetNumInputSlots() > 0) // Ignore the input layer.
telsoa014fcda012018-03-09 14:13:49 +0000198 {
199 return GetInputSlot(0).GetConnection()->GetTensorInfo().GetDataType();
200 }
telsoa01c577f2c2018-08-31 09:22:23 +0100201 return GetOutputSlot(0).GetTensorInfo().GetDataType();
telsoa014fcda012018-03-09 14:13:49 +0000202}
203
204void Layer::ResetPriority() const
205{
206 m_Priority = 0;
207 m_Visiting = false;
208}
209
210LayerPriority Layer::GetPriority() const
211{
212 constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
213 constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
214
215 if (GetType() == LayerType::Input)
216 {
217 m_Priority = inputPrio;
218 }
219 else if (GetType() == LayerType::Output)
220 {
221 m_Priority = outputPrio;
222 }
223 else if (m_Priority == 0)
224 {
225 if (m_Visiting)
226 {
227 throw GraphValidationException("Graph has circular dependencies: cannot walk");
228 }
229
230 auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
231 {
232 const Layer& input = slot.GetConnectedOutputSlot()->GetOwningLayer();
233 return std::max(prio, input.GetPriority());
234 };
235
236 m_Visiting = true;
237 LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
238 m_Visiting = false;
239
240 if (parentPrio >= outputPrio)
241 {
242 throw GraphValidationException("Graph has too many edges");
243 }
244
245 m_Priority = parentPrio + 1U;
246 }
247
248 return m_Priority;
249}
250
telsoa01c577f2c2018-08-31 09:22:23 +0100251void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
252{
253 BOOST_ASSERT(GetNumInputSlots() == expectedConnections);
254
255 for (unsigned int i=0; i<expectedConnections; ++i)
256 {
257 if (GetInputSlot(i).GetConnection() == nullptr)
258 {
259 throw LayerValidationException(
260 boost::str(
261 boost::format(
262 "Input connection #%1% must be connected "
263 "for %2% layer %3% %4%")
264 % i
265 % GetLayerTypeAsCString(this->GetType())
266 % GetNameStr()
267 % location.AsString()));
268 }
269 if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
270 {
271 throw LayerValidationException(
272 boost::str(
273 boost::format(
274 "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
275 "%2% layer %3% %4%")
276 % i
277 % GetLayerTypeAsCString(this->GetType())
278 % GetNameStr()
279 % location.AsString()));
280 }
281 }
282}
283
284std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
285{
286 BOOST_ASSERT(GetNumInputSlots() != 0);
287 BOOST_ASSERT(GetNumOutputSlots() != 0);
288
289 // By default we return what we got, meaning the output shape(s) are the same as the input(s).
290 // This only works if the number of inputs and outputs are the same. Since we are in the Layer
291 // base class, this means the implementation needs to be overridden in the specific layers for
292 // the other cases. So the missing implementation justifies the UnimplementedException.
293
294 if (GetNumInputSlots() != GetNumOutputSlots())
295 {
296 throw UnimplementedException(
297 boost::str(
298 boost::format(
299 "Default implementation for InferOutputShapes can only be used for "
300 "layers with the same number of input and output slots. This doesn't "
301 "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
302 % GetLayerTypeAsCString(this->GetType())
303 % GetNameStr()
304 % GetNumInputSlots()
305 % GetNumOutputSlots()
306 % CHECK_LOCATION().AsString()));
307 }
308 return inputShapes;
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311} // namespace armnn