blob: ce5c5bd4f574cd70ae565c3a7f25b9c080bb21c1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "CaffeParser.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "RecordByRecordCaffeParser.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
8#include "armnn/Descriptors.hpp"
9#include "armnn/INetwork.hpp"
10#include "armnn/Utils.hpp"
11#include "armnn/Exceptions.hpp"
12
13#include "GraphTopologicalSort.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010014#include "VerificationHelpers.hpp"
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/numeric/conversion/cast.hpp>
17#include <boost/assert.hpp>
18#include <boost/format.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
20// Caffe
21#include "caffe/proto/caffe.pb.h"
22
23// ProtoBuf
24#include <google/protobuf/io/coded_stream.h>
25#include <google/protobuf/io/zero_copy_stream.h>
26#include <google/protobuf/io/zero_copy_stream_impl.h>
27#include <google/protobuf/text_format.h>
28#include <google/protobuf/stubs/common.h>
29#include <google/protobuf/stubs/once.h>
30#include <google/protobuf/io/coded_stream.h>
telsoa014fcda012018-03-09 14:13:49 +000031#include <google/protobuf/descriptor.h>
32#include <google/protobuf/generated_message_reflection.h>
33#include <google/protobuf/reflection_ops.h>
34#include <google/protobuf/wire_format.h>
35
36#include <cmath>
37#include <sstream>
38#include <queue>
39#include <fcntl.h>
40
41/// Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the generated
42/// code from caffe.pb.h. This gives us a caffe::NetParameter which is an in-memory version of the file.
43/// This contains a flat list of Caffe 'layers' (e.g. convolution, pooling etc.).
44/// Each layer has inputs (called "bottoms") and outputs (called "tops"). Data flows from bottom to top.
45/// The bottoms of a layer refer to the tops of other layers, not their names.
telsoa01c577f2c2018-08-31 09:22:23 +010046/// The names of layers seem to be arbitrary (you could rename a layer and the network wouldn't
47/// need any other changes).
telsoa014fcda012018-03-09 14:13:49 +000048///
49/// Some layers (e.g. Relu) can be configured so that their top and bottom are both the same. This is called an
50/// "in-place" layer and is a Caffe runtime feature used to reduce memory usage by modifying tensors in-place.
51/// This isn't relevant to the parser and so we preprocess these layers to convert them to regular layers, to result
52/// in a consistent graph structure.
53
54namespace armnnCaffeParser
55{
56
57using namespace armnn;
58using namespace caffe;
59using namespace std;
60using namespace google::protobuf::io;
61
telsoa01c577f2c2018-08-31 09:22:23 +010062namespace
telsoa014fcda012018-03-09 14:13:49 +000063{
64
telsoa01c577f2c2018-08-31 09:22:23 +010065const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int blobIndex)
telsoa014fcda012018-03-09 14:13:49 +000066{
telsoa01c577f2c2018-08-31 09:22:23 +010067 auto nBlobs = layerParam.blobs_size();
68 if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
telsoa014fcda012018-03-09 14:13:49 +000069 {
telsoa01c577f2c2018-08-31 09:22:23 +010070 throw ParseException(
71 boost::str(
72 boost::format(
73 "Expected data blob at index %1% in layer %2% not found. nBlobs=%2%. %4%") %
74 blobIndex %
75 layerParam.name() %
76 nBlobs %
77 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +000078 }
79
80 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
81
telsoa01c577f2c2018-08-31 09:22:23 +010082 const float* arrayPtr = blob.data().data();
83 return arrayPtr;
84}
85
86void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex)
87{
88 auto nBlobs = layerParam.blobs_size();
89 if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
telsoa014fcda012018-03-09 14:13:49 +000090 {
telsoa01c577f2c2018-08-31 09:22:23 +010091 throw ParseException(
92 boost::str(
93 boost::format(
94 "Expected data blob at index %1% in layer %2% not found. %3%") %
95 blobIndex %
96 layerParam.name() %
97 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +000098 }
99
telsoa01c577f2c2018-08-31 09:22:23 +0100100 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
101
102 size_t blobSize = boost::numeric_cast<size_t>(blob.data_size());
103 if (blobSize != outData.size())
telsoa014fcda012018-03-09 14:13:49 +0000104 {
telsoa01c577f2c2018-08-31 09:22:23 +0100105 throw ParseException(
106 boost::str(
107 boost::format(
108 "Data blob at index %1% in layer %2% has an unexpected size. "
109 "Expected %3% elements but got %4% elements. %5%") %
110 blobIndex %
111 layerParam.name() %
112 outData.size() %
113 blobSize %
114 CHECK_LOCATION().AsString()));
115 }
116
117 int outSizeInt = boost::numeric_cast<int>(outData.size());
118 for (int i = 0; i < outSizeInt; ++i)
119 {
120 outData[static_cast<size_t>(i)] = blob.data(i);
telsoa014fcda012018-03-09 14:13:49 +0000121 }
122}
123
telsoa014fcda012018-03-09 14:13:49 +0000124template <typename T>
125size_t SizeOfVectorData(const vector<T>& vec)
126{
127 return vec.size() * sizeof(T);
128}
129
130void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
131 unsigned int numInputs,
132 unsigned int numOutputs)
133{
134 int numInputsActual = layerParameter.bottom_size();
135 if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual))
136 {
telsoa01c577f2c2018-08-31 09:22:23 +0100137 throw ParseException(
138 boost::str(
139 boost::format("Invalid number of inputs requested %1% for layer %2% "
140 "while only %3% present. %4%") %
141 numInputs %
142 layerParameter.name() %
143 numInputsActual %
144 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000145 }
146
147 int numOutputsActual = layerParameter.top_size();
148 if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual))
149 {
telsoa01c577f2c2018-08-31 09:22:23 +0100150 throw ParseException(
151 boost::str(
152 boost::format("Invalid number of outputs requested %1% for layer %2% "
153 "while only %3% present. %4%") %
154 numOutputs %
155 layerParameter.name() %
156 numOutputsActual %
157 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000158 }
159}
160
telsoa01c577f2c2018-08-31 09:22:23 +0100161template <typename ParamType, typename ExtractOptional, typename ExtractFallback, typename ValueType>
162ValueType GetOptionalWithFallback(const ParamType& param,
163 ExtractOptional extractOptional,
164 ExtractFallback extractFallback,
165 ValueType defaultValue)
166{
167 auto optValue = extractOptional(param, defaultValue);
168 if (optValue.first)
169 {
170 return optValue.second;
171 }
172 auto fallbackValue = extractFallback(param, defaultValue);
173 return fallbackValue.second;
174}
175
176#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, \
177 PARAM_TYPE, \
178 OPTIONAL_VALUE, \
179 FALLBACK_VECTOR, \
180 VALUE_TYPE, \
181 DEFAULT_VALUE) \
182 GetOptionalWithFallback( \
183 PARAM, \
184 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
185 { \
186 if (param.has_##OPTIONAL_VALUE ()) \
187 { \
188 return std::make_pair(true, param.OPTIONAL_VALUE ()); \
189 } \
190 else \
191 { \
192 return std::make_pair(false, defaultValue); \
193 } \
194 }, \
195 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
196 { \
197 if (param.FALLBACK_VECTOR##_size() > 0) \
198 { \
199 return std::make_pair(true, (param.FALLBACK_VECTOR ()).Get(0)); \
200 } \
201 else \
202 { \
203 return std::make_pair(false, defaultValue); \
204 } \
205 }, \
206 DEFAULT_VALUE)
207
208#define GET_OPTIONAL_WITH_FALLBACK(PARAM, \
209 PARAM_TYPE, \
210 OPTIONAL_VALUE, \
211 FALLBACK_VALUE, \
212 VALUE_TYPE, \
213 DEFAULT_VALUE) \
214 GetOptionalWithFallback( \
215 PARAM, \
216 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
217 { \
218 if (param.has_##OPTIONAL_VALUE ()) \
219 { \
220 return std::make_pair(true, param.OPTIONAL_VALUE ()); \
221 } \
222 else \
223 { \
224 return std::make_pair(false, defaultValue); \
225 } \
226 }, \
227 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
228 { \
229 if (param.has_##FALLBACK_VALUE ()) \
230 { \
231 return std::make_pair(true, param.FALLBACK_VALUE ()); \
232 } \
233 else \
234 { \
235 return std::make_pair(false, defaultValue); \
236 } \
237 }, \
238 DEFAULT_VALUE)
239
telsoa01c577f2c2018-08-31 09:22:23 +0100240} // namespace <anonymous>
241
242const std::map<std::string, CaffeParserBase::OperationParsingFunction>
243 CaffeParserBase::ms_CaffeLayerNameToParsingFunctions = {
244 { "Input", &CaffeParserBase::ParseInputLayer },
245 { "Convolution", &CaffeParserBase::ParseConvLayer },
246 { "Pooling", &CaffeParserBase::ParsePoolingLayer },
247 { "ReLU", &CaffeParserBase::ParseReluLayer },
248 { "LRN", &CaffeParserBase::ParseLRNLayer },
249 { "InnerProduct", &CaffeParserBase::ParseInnerProductLayer },
250 { "Softmax", &CaffeParserBase::ParseSoftmaxLayer },
251 { "Eltwise", &CaffeParserBase::ParseEltwiseLayer },
252 { "Concat", &CaffeParserBase::ParseConcatLayer },
253 { "BatchNorm", &CaffeParserBase::ParseBatchNormLayer },
254 { "Scale", &CaffeParserBase::ParseScaleLayer },
255 { "Split", &CaffeParserBase::ParseSplitLayer },
256 { "Dropout", &CaffeParserBase::ParseDropoutLayer},
257};
258
259ICaffeParser* ICaffeParser::CreateRaw()
260{
261 return new RecordByRecordCaffeParser();
262}
263
264ICaffeParserPtr ICaffeParser::Create()
265{
266 return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy);
267}
268
269void ICaffeParser::Destroy(ICaffeParser* parser)
270{
271 delete parser;
272}
273
274CaffeParserBase::CaffeParserBase()
275 : m_Network(nullptr, nullptr)
276{
277
278}
279
280CaffeParser::CaffeParser()
281: CaffeParserBase()
282{
283
284}
285
286BindingPointInfo CaffeParserBase::GetNetworkInputBindingInfo(const std::string& name) const
telsoa014fcda012018-03-09 14:13:49 +0000287{
288 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
289}
290
telsoa01c577f2c2018-08-31 09:22:23 +0100291BindingPointInfo CaffeParserBase::GetNetworkOutputBindingInfo(const std::string& name) const
telsoa014fcda012018-03-09 14:13:49 +0000292{
293 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
294}
295
telsoa01c577f2c2018-08-31 09:22:23 +0100296std::pair<armnn::LayerBindingId, armnn::TensorInfo> CaffeParserBase::GetBindingInfo(const std::string& layerName,
telsoa014fcda012018-03-09 14:13:49 +0000297 const char* bindingPointDesc,
298 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
299{
300 auto it = nameToBindingInfo.find(layerName);
301 if (it == nameToBindingInfo.end())
302 {
telsoa01c577f2c2018-08-31 09:22:23 +0100303 throw InvalidArgumentException(
304 boost::str(
305 boost::format(
306 "Unknown binding %1% for layer '%2%'. %3%") %
307 bindingPointDesc %
308 layerName %
309 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000310 }
311 return it->second;
312}
313
telsoa01c577f2c2018-08-31 09:22:23 +0100314TensorInfo CaffeParserBase::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const
telsoa014fcda012018-03-09 14:13:49 +0000315{
316 std::vector<unsigned int> shape;
317 for (int j = 0; j < blobShape.dim_size(); ++j)
318 {
319 shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
320 }
321
322 return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
323}
324
325BlobShape TensorDescToBlobShape(const TensorInfo& desc)
326{
327 BlobShape ret;
328 for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i)
329 {
330 ret.add_dim(i);
331 ret.set_dim(boost::numeric_cast<int>(i), desc.GetShape()[i]);
332 }
333
334 return ret;
335}
336
telsoa01c577f2c2018-08-31 09:22:23 +0100337// Note: can move to CaffeParser when/if we optimise the text/string format
338// to load on a layer by layer basis
339vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +0000340{
341 std::vector<const caffe::LayerParameter*> ret;
342 ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size()));
343 for (int j = 0; j < layerParam.bottom_size(); ++j)
344 {
345 std::string inputName = layerParam.bottom(j);
346 auto inputIt = m_CaffeLayersByTopName.find(inputName);
347 if (inputIt == m_CaffeLayersByTopName.end())
348 {
349 throw ParseException(
telsoa01c577f2c2018-08-31 09:22:23 +0100350 boost::str(
351 boost::format(
352 "Can't find Caffe layer with top called '%1%', "
353 "which is listed as an input of '%2%'. %3%") %
354 inputName %
355 layerParam.name() %
356 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000357 }
358 ret.push_back(inputIt->second);
359 }
360
361 return ret;
362}
363
telsoa01c577f2c2018-08-31 09:22:23 +0100364void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +0000365{
366 BOOST_ASSERT(layerParam.type() == "Input");
367 ValidateNumInputsOutputs(layerParam, 0, 1);
368
369 const InputParameter& param = layerParam.input_param();
370
telsoa01c577f2c2018-08-31 09:22:23 +0100371 const armnn::LayerBindingId inputId = boost::numeric_cast<armnn::LayerBindingId>(
372 m_NetworkInputsBindingInfo.size());
telsoa014fcda012018-03-09 14:13:49 +0000373 armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
374
telsoa01c577f2c2018-08-31 09:22:23 +0100375 // Decides the tensor info for this input. This can be specified in the Caffe network but can also
telsoa014fcda012018-03-09 14:13:49 +0000376 // be overriden by user input (m_inputShapes).
377 armnn::TensorInfo inputTensorInfo;
378
379 const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
380 &param.shape(0) : nullptr;
381 if (originalShape)
382 {
383 inputTensorInfo = BlobShapeToTensorInfo(*originalShape);
384 }
385
386 auto overrideIt = m_InputShapes.find(layerParam.name());
387 if (overrideIt != m_InputShapes.end())
388 {
389 const TensorShape& overrideShape = overrideIt->second;
390 if (originalShape &&
391 ( originalShape->dim(1) != overrideShape[1]
392 || originalShape->dim(2) != overrideShape[2]
393 || originalShape->dim(3) != overrideShape[3]))
394 {
telsoa01c577f2c2018-08-31 09:22:23 +0100395 throw ParseException(
396 boost::str(
397 boost::format(
398 "Parsed input shape for '%1%' is incompatible with the override provided. %2%") %
399 layerParam.name() %
400 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000401 }
402 inputTensorInfo.SetShape(overrideShape);
403 }
404 else if (!originalShape)
405 {
telsoa01c577f2c2018-08-31 09:22:23 +0100406 throw ParseException(
407 boost::str(
408 boost::format(
409 "No input descriptor given for '%1%' and no input shape found in caffe model. %2%") %
410 layerParam.name() %
411 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000412 }
413
414 TrackInputBinding(inputLayer, inputId, inputTensorInfo);
415 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
416 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0));
417}
418
telsoa01c577f2c2018-08-31 09:22:23 +0100419void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerParam,
420 const armnn::Convolution2dDescriptor& desc,
421 unsigned int kernelW,
422 unsigned int kernelH)
telsoa014fcda012018-03-09 14:13:49 +0000423{
424 BOOST_ASSERT(layerParam.type() == "Convolution");
425 ValidateNumInputsOutputs(layerParam, 1, 1);
426
telsoa01c577f2c2018-08-31 09:22:23 +0100427 ConvolutionParameter convParam = layerParam.convolution_param();
telsoa014fcda012018-03-09 14:13:49 +0000428 BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
telsoa01c577f2c2018-08-31 09:22:23 +0100429 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
telsoa014fcda012018-03-09 14:13:49 +0000430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // asusme these were already verified by the caller ParseConvLayer() function
432 BOOST_ASSERT(numGroups < inputShape.dim(1));
433 BOOST_ASSERT(numGroups > 1);
telsoa014fcda012018-03-09 14:13:49 +0000434
435 // Handle grouping
telsoa014fcda012018-03-09 14:13:49 +0000436 armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
437
438 vector<string> convLayerNames(numGroups);
439 vector<armnn::IConnectableLayer*> convLayers(numGroups);
440 convLayerNames[0] = layerParam.name();
441
telsoa01c577f2c2018-08-31 09:22:23 +0100442 // This convolution is to be applied to chunks of the input data so add a splitter layer
443
444 // Redirect the convolution input to the splitter
445 unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
446 static_cast<unsigned int>(inputShape.dim(1)),
447 static_cast<unsigned int>(inputShape.dim(2)),
448 static_cast<unsigned int>(inputShape.dim(3))};
449
450 // Split dimension 1 of the splitter output shape and conv input shapes
451 // according to the number of groups
452
453 splitterDimSizes[1] /= numGroups;
454 inputShape.set_dim(1, splitterDimSizes[1]);
455
456 // This is used to describe how the input is to be split
457 ViewsDescriptor splitterDesc(numGroups);
458
459 // Create an output node for each group, giving each a unique name
460 for (unsigned int g = 0; g < numGroups; ++g)
telsoa014fcda012018-03-09 14:13:49 +0000461 {
telsoa01c577f2c2018-08-31 09:22:23 +0100462 // Work out the names of the splitter layers child convolutions
463 stringstream ss;
464 ss << layerParam.name() << "_" << g;
465 convLayerNames[g] = ss.str();
telsoa014fcda012018-03-09 14:13:49 +0000466
telsoa01c577f2c2018-08-31 09:22:23 +0100467 splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
telsoa014fcda012018-03-09 14:13:49 +0000468
telsoa01c577f2c2018-08-31 09:22:23 +0100469 // Set the size of the views.
470 for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
telsoa014fcda012018-03-09 14:13:49 +0000471 {
telsoa01c577f2c2018-08-31 09:22:23 +0100472 splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
telsoa014fcda012018-03-09 14:13:49 +0000473 }
474 }
475
telsoa01c577f2c2018-08-31 09:22:23 +0100476 const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
477 armnn::IConnectableLayer* splitterLayer = m_Network->AddSplitterLayer(splitterDesc, splitterLayerName.c_str());
telsoa014fcda012018-03-09 14:13:49 +0000478
telsoa01c577f2c2018-08-31 09:22:23 +0100479 inputConnection.Connect(splitterLayer->GetInputSlot(0));
480 for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
481 {
482 splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
483 }
telsoa014fcda012018-03-09 14:13:49 +0000484
485 unsigned int numFilters = convParam.num_output();
486
telsoa01c577f2c2018-08-31 09:22:23 +0100487 // Populates convolution output tensor descriptor dimensions.
telsoa014fcda012018-03-09 14:13:49 +0000488 BlobShape outputShape;
489 outputShape.add_dim(0);
490 outputShape.set_dim(0, inputShape.dim(0));
491 outputShape.add_dim(1);
telsoa01c577f2c2018-08-31 09:22:23 +0100492 // Ensures that dimension 1 of the convolution output is split according to the number of groups.
telsoa014fcda012018-03-09 14:13:49 +0000493 outputShape.set_dim(1, numFilters / numGroups);
494 outputShape.add_dim(2);
495 outputShape.set_dim(
telsoa01c577f2c2018-08-31 09:22:23 +0100496 2, (static_cast<int>(
497 static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - kernelH) /
498 static_cast<float>(desc.m_StrideY)) + 1));
telsoa014fcda012018-03-09 14:13:49 +0000499 outputShape.add_dim(3);
500 outputShape.set_dim(
telsoa01c577f2c2018-08-31 09:22:23 +0100501 3, (static_cast<int>(
502 static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - kernelW) /
503 static_cast<float>(desc.m_StrideX)) + 1));
telsoa014fcda012018-03-09 14:13:49 +0000504
505 // Load the weight data for ALL groups
telsoa01c577f2c2018-08-31 09:22:23 +0100506 vector<float> weightData(boost::numeric_cast<size_t>(numGroups *
507 inputShape.dim(1) * // number of input channels
508 outputShape.dim(1) * // number of output channels
509 kernelH *
510 kernelW));
telsoa014fcda012018-03-09 14:13:49 +0000511 GetDataFromBlob(layerParam, weightData, 0);
512
513 const unsigned int weightDimSizes[4] = {
telsoa01c577f2c2018-08-31 09:22:23 +0100514 static_cast<unsigned int>(outputShape.dim(1)),
515 static_cast<unsigned int>(inputShape.dim(1)),
516 kernelH,
517 kernelW};
telsoa014fcda012018-03-09 14:13:49 +0000518
telsoa014fcda012018-03-09 14:13:49 +0000519 TensorInfo biasInfo;
520 vector<float> biasData;
telsoa01c577f2c2018-08-31 09:22:23 +0100521
522 if (desc.m_BiasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000523 {
524 biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
525 GetDataFromBlob(layerParam, biasData, 1);
526
527 const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
528 biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
529 }
530
531 const unsigned int numWeightsPerGroup = boost::numeric_cast<unsigned int>(weightData.size()) / numGroups;
532 const unsigned int numBiasesPerGroup = boost::numeric_cast<unsigned int>(biasData.size()) / numGroups;
533
telsoa014fcda012018-03-09 14:13:49 +0000534 for (unsigned int g = 0; g < numGroups; ++g)
535 {
telsoa01c577f2c2018-08-31 09:22:23 +0100536 // Sets the slot index, group 0 should be connected to the 0th output of the splitter
537 // group 1 should be connected to the 1st output of the splitter.
telsoa014fcda012018-03-09 14:13:49 +0000538
telsoa01c577f2c2018-08-31 09:22:23 +0100539 // Pulls out the weights for this group from that loaded from the model file earlier.
telsoa014fcda012018-03-09 14:13:49 +0000540 ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
541 weightData.data() + numWeightsPerGroup * g);
542
543 IConnectableLayer* convLayer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +0100544 Optional<ConstTensor> optionalBiases;
telsoa01c577f2c2018-08-31 09:22:23 +0100545 if (desc.m_BiasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000546 {
telsoa01c577f2c2018-08-31 09:22:23 +0100547 // Pulls out the biases for this group from that loaded from the model file earlier.
telsoa014fcda012018-03-09 14:13:49 +0000548 ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
Matteo Martincighfc598e12019-05-14 10:36:13 +0100549 optionalBiases = Optional<ConstTensor>(biases);
telsoa014fcda012018-03-09 14:13:49 +0000550 }
Matteo Martincighfc598e12019-05-14 10:36:13 +0100551 convLayer = m_Network->AddConvolution2dLayer(desc,
552 weights,
553 optionalBiases,
554 convLayerNames[g].c_str());
telsoa014fcda012018-03-09 14:13:49 +0000555 convLayers[g] = convLayer;
556
557 // If we have more than one group then the input to the nth convolution the splitter layer's nth output,
558 // otherwise it's the regular input to this layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100559 armnn::IOutputSlot& splitterInputConnection =
560 splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
telsoa014fcda012018-03-09 14:13:49 +0000561 splitterInputConnection.Connect(convLayer->GetInputSlot(0));
562 convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000563 }
564
Jim Flynne242f2d2019-05-22 14:24:13 +0100565 // If the convolution was performed in chunks, add a layer to concatenate the results
telsoa01c577f2c2018-08-31 09:22:23 +0100566
567 // The merge input shape matches that of the convolution output
Jim Flynne242f2d2019-05-22 14:24:13 +0100568 unsigned int concatDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
569 static_cast<unsigned int>(outputShape.dim(1)),
570 static_cast<unsigned int>(outputShape.dim(2)),
571 static_cast<unsigned int>(outputShape.dim(3))};
telsoa01c577f2c2018-08-31 09:22:23 +0100572
Jim Flynne242f2d2019-05-22 14:24:13 +0100573 // This is used to describe how the input is to be concatenated
574 OriginsDescriptor concatDesc(numGroups);
telsoa01c577f2c2018-08-31 09:22:23 +0100575
576 // Now create an input node for each group, using the name from
577 // the output of the corresponding convolution
578 for (unsigned int g = 0; g < numGroups; ++g)
telsoa014fcda012018-03-09 14:13:49 +0000579 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100580 concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g);
telsoa01c577f2c2018-08-31 09:22:23 +0100581 }
telsoa014fcda012018-03-09 14:13:49 +0000582
Jim Flynne242f2d2019-05-22 14:24:13 +0100583 // Make sure the output from the concat is the correct size to hold the data for all groups
584 concatDimSizes[1] *= numGroups;
585 outputShape.set_dim(1, concatDimSizes[1]);
telsoa014fcda012018-03-09 14:13:49 +0000586
Jim Flynne242f2d2019-05-22 14:24:13 +0100587 // Finally add the concat layer
588 IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str());
telsoa014fcda012018-03-09 14:13:49 +0000589
Jim Flynne242f2d2019-05-22 14:24:13 +0100590 if (!concatLayer)
telsoa01c577f2c2018-08-31 09:22:23 +0100591 {
592 throw ParseException(
593 boost::str(
594 boost::format(
Jim Flynne242f2d2019-05-22 14:24:13 +0100595 "Failed to create final concat layer for Split+Convolution+Concat. "
telsoa01c577f2c2018-08-31 09:22:23 +0100596 "Layer=%1% #groups=%2% #filters=%3% %4%") %
597 layerParam.name() %
598 numGroups %
599 numFilters %
600 CHECK_LOCATION().AsString()));
601 }
telsoa014fcda012018-03-09 14:13:49 +0000602
telsoa01c577f2c2018-08-31 09:22:23 +0100603 for (unsigned int g = 0; g < numGroups; ++g)
604 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100605 convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g));
telsoa01c577f2c2018-08-31 09:22:23 +0100606 }
Jim Flynne242f2d2019-05-22 14:24:13 +0100607 concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32));
608 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0));
telsoa01c577f2c2018-08-31 09:22:23 +0100609}
telsoa014fcda012018-03-09 14:13:49 +0000610
telsoa01c577f2c2018-08-31 09:22:23 +0100611void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam,
612 const armnn::Convolution2dDescriptor& convDesc,
613 unsigned int kernelW,
614 unsigned int kernelH)
615{
616 BOOST_ASSERT(layerParam.type() == "Convolution");
617 ValidateNumInputsOutputs(layerParam, 1, 1);
telsoa014fcda012018-03-09 14:13:49 +0000618
telsoa01c577f2c2018-08-31 09:22:23 +0100619 ConvolutionParameter convParam = layerParam.convolution_param();
620 BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
telsoa014fcda012018-03-09 14:13:49 +0000621
telsoa01c577f2c2018-08-31 09:22:23 +0100622 DepthwiseConvolution2dDescriptor desc;
623 desc.m_PadLeft = convDesc.m_PadLeft;
624 desc.m_PadRight = convDesc.m_PadRight;
625 desc.m_PadTop = convDesc.m_PadTop;
626 desc.m_PadBottom = convDesc.m_PadBottom;
627 desc.m_StrideX = convDesc.m_StrideX;
628 desc.m_StrideY = convDesc.m_StrideY;
629 desc.m_BiasEnabled = convDesc.m_BiasEnabled;
telsoa014fcda012018-03-09 14:13:49 +0000630
telsoa01c577f2c2018-08-31 09:22:23 +0100631 unsigned int numFilters = convParam.num_output();
632
633 BlobShape outputShape;
634 outputShape.add_dim(0);
635 outputShape.set_dim(0, inputShape.dim(0));
636 outputShape.add_dim(1);
637 outputShape.set_dim(1, numFilters);
638 outputShape.add_dim(2);
639 outputShape.set_dim(
640 2, (static_cast<int>(
641 static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - kernelH) /
642 static_cast<float>(desc.m_StrideY)) + 1));
643 outputShape.add_dim(3);
644 outputShape.set_dim(
645 3, (static_cast<int>(
646 static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - kernelW) /
647 static_cast<float>(desc.m_StrideX)) + 1));
648
649 // Load the weight data
650 size_t allWeightsSize = boost::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
651 vector<float> weightData(allWeightsSize);
652
653 GetDataFromBlob(layerParam, weightData, 0);
654
655 // depth multiplier will be 1 for the depthwise convolution
656 const unsigned int weightDimSizes[4] = {
657 static_cast<unsigned int>(1), // depth multiplier
658 static_cast<unsigned int>(inputShape.dim(1)), // #channels
659 kernelH,
660 kernelW};
661
662 armnn::IConnectableLayer* returnLayer = nullptr;
663 ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
Matteo Martincighfc598e12019-05-14 10:36:13 +0100664 Optional<ConstTensor> optionalBiases;
665 vector<float> biasData;
telsoa01c577f2c2018-08-31 09:22:23 +0100666 if (desc.m_BiasEnabled)
667 {
668 TensorInfo biasInfo;
telsoa01c577f2c2018-08-31 09:22:23 +0100669
670 biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
671 GetDataFromBlob(layerParam, biasData, 1);
672
673 const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
674 biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
675
676 ConstTensor biases(biasInfo, biasData.data());
Matteo Martincighfc598e12019-05-14 10:36:13 +0100677 optionalBiases = Optional<ConstTensor>(biases);
telsoa01c577f2c2018-08-31 09:22:23 +0100678 }
Matteo Martincighfc598e12019-05-14 10:36:13 +0100679 returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc,
680 weights,
681 optionalBiases,
682 layerParam.name().c_str());
telsoa014fcda012018-03-09 14:13:49 +0000683
surmeh013537c2c2018-05-18 16:31:43 +0100684 if (!returnLayer)
685 {
telsoa01c577f2c2018-08-31 09:22:23 +0100686 throw ParseException(
687 boost::str(
688 boost::format(
689 "Failed to create depthwise convolution layer. "
690 "Layer=%1% #filters=%2% %3%") %
691 layerParam.name() %
692 numFilters %
693 CHECK_LOCATION().AsString()));
694 }
695 armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
696 inputConnection.Connect(returnLayer->GetInputSlot(0));
697 returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
698 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
699}
700
701void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
702{
703 // Ignored Caffe Parameters
704 // * Dilation Size
705 // * Weight Filler
706 // * Bias Filler
707 // * Engine
708 // * Force nd_im2col
709 // * Axis
710
711 // Not Available ArmNN Interface Parameters
712 // * Rounding policy;
713
714 BOOST_ASSERT(layerParam.type() == "Convolution");
715 ValidateNumInputsOutputs(layerParam, 1, 1);
716
717 ConvolutionParameter convParam = layerParam.convolution_param();
718 BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
719 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
720 unsigned int numFilters = convParam.num_output();
721
722 const auto notFound = std::numeric_limits<unsigned int>::max();
723
724 unsigned int kernelH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
725 kernel_h, kernel_size, unsigned int, notFound);
726 unsigned int kernelW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
727 kernel_w, kernel_size, unsigned int, notFound);
728
729 unsigned int strideH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
730 stride_h, stride, unsigned int, 1u);
731 unsigned int strideW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
732 stride_w, stride, unsigned int, 1u);
733
734 unsigned int padH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
735 pad_h, pad, unsigned int, 0u);
736 unsigned int padW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
737 pad_w, pad, unsigned int, 0u);
738
telsoa01c577f2c2018-08-31 09:22:23 +0100739 Convolution2dDescriptor convolution2dDescriptor;
740 convolution2dDescriptor.m_PadLeft = padW;
741 convolution2dDescriptor.m_PadRight = padW;
742 convolution2dDescriptor.m_PadTop = padH;
743 convolution2dDescriptor.m_PadBottom = padH;
744 convolution2dDescriptor.m_StrideX = strideW;
745 convolution2dDescriptor.m_StrideY = strideH;
746 convolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
747
748 if (numGroups > numFilters)
749 {
750 throw ParseException(
751 boost::str(
752 boost::format(
753 "Error parsing Convolution: %1%. "
754 "The 'group'=%2% parameter cannot be larger than the "
755 "number of filters supplied ='%3%'. %4%") %
756 layerParam.name() %
757 numGroups %
758 numFilters %
759 CHECK_LOCATION().AsString()));
760 }
761
762 if (inputShape.dim_size() != 4)
763 {
764 throw ParseException(
765 boost::str(
766 boost::format(
767 "Convolution input shape is expected to have 4 dimensions. "
768 "%1%'s input has only %2%. %3%") %
769 layerParam.name() %
770 inputShape.dim_size() %
771 CHECK_LOCATION().AsString()));
772 }
773
774 if (numGroups > 1)
775 {
776 if (numGroups > inputShape.dim(1))
777 {
778 throw ParseException(
779 boost::str(
780 boost::format(
781 "Error parsing Convolution: %1%. "
782 "The 'group'=%2% parameter cannot be larger than the "
783 "channel of the input shape=%3% (in NCHW format). %4%") %
784 layerParam.name() %
785 numGroups %
786 inputShape.dim(1) %
787 CHECK_LOCATION().AsString()));
788 }
789 else if (numGroups == inputShape.dim(1))
790 {
791 // we use a depthwise convolution here, because the number of groups equals to the
792 // input channels
793 AddConvLayerWithDepthwiseConv(layerParam, convolution2dDescriptor, kernelW, kernelH);
794 return;
795 }
796 else
797 {
798 // we split the input by channels into channels/groups separate convolutions
Jim Flynne242f2d2019-05-22 14:24:13 +0100799 // and concatenate the results afterwards
telsoa01c577f2c2018-08-31 09:22:23 +0100800 AddConvLayerWithSplits(layerParam, convolution2dDescriptor, kernelW, kernelH);
801 return;
802 }
803 }
804
805 // NOTE: at this point we only need to handle #group=1 case, all other cases should be
806 // handled by the AddConvLayer* helpers
807
808 // Populate convolution output tensor descriptor dimensions
809 BlobShape outputShape;
810 outputShape.add_dim(0);
811 outputShape.set_dim(0, inputShape.dim(0));
812 outputShape.add_dim(1);
813 outputShape.set_dim(1, numFilters);
814 outputShape.add_dim(2);
815 outputShape.set_dim(
816 2, (static_cast<int>(
817 static_cast<float>(inputShape.dim(2) + 2 * padH - kernelH) /
818 static_cast<float>(strideH)) + 1));
819 outputShape.add_dim(3);
820 outputShape.set_dim(
821 3, (static_cast<int>(
822 static_cast<float>(inputShape.dim(3) + 2 * padW - kernelW) /
823 static_cast<float>(strideW)) + 1));
824
825 // Load the weight data for ALL groups
826 vector<float> weightData(boost::numeric_cast<size_t>(inputShape.dim(1) *
827 outputShape.dim(1) *
828 kernelH *
829 kernelW));
830 GetDataFromBlob(layerParam, weightData, 0);
831
832 const unsigned int weightDimSizes[4] = {
833 static_cast<unsigned int>(outputShape.dim(1)), // output channels
834 static_cast<unsigned int>(inputShape.dim(1)), // input channels
835 kernelH,
836 kernelW};
837
838 armnn::IConnectableLayer* returnLayer = nullptr;
839
840 // Pull out the weights for this group from that loaded from the model file earlier
841 ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
Matteo Martincighfc598e12019-05-14 10:36:13 +0100842 Optional<ConstTensor> optionalBiases;
843 vector<float> biasData;
telsoa01c577f2c2018-08-31 09:22:23 +0100844 if (convolution2dDescriptor.m_BiasEnabled)
845 {
846 TensorInfo biasInfo;
telsoa01c577f2c2018-08-31 09:22:23 +0100847
848 biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
849 GetDataFromBlob(layerParam, biasData, 1);
850
851 const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
852 biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
853
854 // Pull out the biases for this group from that loaded from the model file earlier
855 ConstTensor biases(biasInfo, biasData.data());
Matteo Martincighfc598e12019-05-14 10:36:13 +0100856 optionalBiases = Optional<ConstTensor>(biases);
telsoa01c577f2c2018-08-31 09:22:23 +0100857 }
Matteo Martincighfc598e12019-05-14 10:36:13 +0100858 returnLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
859 weights,
860 optionalBiases,
861 layerParam.name().c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100862
863 armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
864 inputConnection.Connect(returnLayer->GetInputSlot(0));
865 returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
866
867 if (!returnLayer)
868 {
869 throw ParseException(
870 boost::str(
871 boost::format(
872 "Failed to create Convolution layer. "
873 "Layer=%1% #groups=%2% #filters=%3% %4%") %
874 layerParam.name() %
875 numGroups %
876 numFilters %
877 CHECK_LOCATION().AsString()));
surmeh013537c2c2018-05-18 16:31:43 +0100878 }
879
telsoa014fcda012018-03-09 14:13:49 +0000880 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
881}
882
telsoa01c577f2c2018-08-31 09:22:23 +0100883void CaffeParserBase::ParsePoolingLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +0000884{
telsoa01c577f2c2018-08-31 09:22:23 +0100885 // Ignored Caffe Parameters
886 // Stochastic Pooling
887 // Engine
888
telsoa014fcda012018-03-09 14:13:49 +0000889 ValidateNumInputsOutputs(layerParam, 1, 1);
telsoa014fcda012018-03-09 14:13:49 +0000890 PoolingParameter param = layerParam.pooling_param();
telsoa014fcda012018-03-09 14:13:49 +0000891 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
892
telsoa01c577f2c2018-08-31 09:22:23 +0100893 const auto notFound = std::numeric_limits<unsigned int>::max();
894
895 unsigned int kernel_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
896 kernel_h, kernel_size, unsigned int, notFound);
897 unsigned int kernel_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
898 kernel_w, kernel_size, unsigned int, notFound);
899
900 if ((kernel_h == notFound || kernel_w == notFound) && param.has_global_pooling())
telsoa014fcda012018-03-09 14:13:49 +0000901 {
902 kernel_h = inputInfo.GetShape()[2];
903 kernel_w = inputInfo.GetShape()[3];
904 }
telsoa01c577f2c2018-08-31 09:22:23 +0100905
telsoa01c577f2c2018-08-31 09:22:23 +0100906 unsigned int stride_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
907 stride_h, stride, unsigned int, notFound);
908 unsigned int stride_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
909 stride_h, stride, unsigned int, notFound);
910
911 if ((stride_h == notFound || stride_w == notFound) && param.has_global_pooling())
telsoa014fcda012018-03-09 14:13:49 +0000912 {
telsoa01c577f2c2018-08-31 09:22:23 +0100913 stride_h = 1;
914 stride_w = 1;
telsoa014fcda012018-03-09 14:13:49 +0000915 }
916
telsoa01c577f2c2018-08-31 09:22:23 +0100917 unsigned int pad_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
918 pad_h, pad, unsigned int, 0u);
919 unsigned int pad_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
920 pad_w, pad, unsigned int, 0u);
telsoa014fcda012018-03-09 14:13:49 +0000921
telsoa014fcda012018-03-09 14:13:49 +0000922 // Populate Weight and Bias Filter Descriptor
923 Pooling2dDescriptor pooling2dDescriptor;
924 if (param.has_pool())
925 {
926 PoolingParameter_PoolMethod p = param.pool();
927 switch (p)
928 {
929 case PoolingParameter_PoolMethod_MAX:
930 {
931 pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
932 break;
933 }
934 case PoolingParameter_PoolMethod_AVE:
935 {
936 pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
937 break;
938 }
939 case PoolingParameter_PoolMethod_STOCHASTIC:
940 {
telsoa01c577f2c2018-08-31 09:22:23 +0100941 throw ParseException(
942 boost::str(
943 boost::format(
944 "Pooling Layer: Stochastic Pooling Not Supported. Layer=%1% %2%") %
945 layerParam.name() %
946 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000947 }
948 default:
949 {
telsoa01c577f2c2018-08-31 09:22:23 +0100950 throw ParseException(
951 boost::str(
952 boost::format(
953 "Pooling Layer: unknown pooling method: %1% for layer: %2% %3%") %
954 p %
955 layerParam.name() %
956 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000957 }
958 }
959 }
960 else
961 {
telsoa01c577f2c2018-08-31 09:22:23 +0100962 throw ParseException(
963 boost::str(
964 boost::format(
965 "No Pooling Method Defined for %1% %2%") %
966 layerParam.name() %
967 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +0000968 }
969
970 pooling2dDescriptor.m_PadLeft = pad_w;
971 pooling2dDescriptor.m_PadRight = pad_w;
972 pooling2dDescriptor.m_PadTop = pad_h;
973 pooling2dDescriptor.m_PadBottom = pad_h;
974 pooling2dDescriptor.m_StrideX = stride_w;
975 pooling2dDescriptor.m_StrideY = stride_h;
976 pooling2dDescriptor.m_PoolWidth = kernel_w;
977 pooling2dDescriptor.m_PoolHeight = kernel_h;
978
979 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
980 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
981
982 armnn::IConnectableLayer* poolingLayer = m_Network->AddPooling2dLayer(pooling2dDescriptor,
983 layerParam.name().c_str());
984
telsoa014fcda012018-03-09 14:13:49 +0000985 TensorInfo outputInfo(
986 { inputInfo.GetShape()[0],
987 inputInfo.GetShape()[1],
988 static_cast<unsigned int>(ceil(
989 static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) /
990 boost::numeric_cast<float>(stride_h))) + 1,
991 static_cast<unsigned int>(ceil(
992 static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) /
993 boost::numeric_cast<float>(stride_w))) + 1 },
994 DataType::Float32);
995
996 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0));
997 poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
998 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0));
999}
1000
telsoa01c577f2c2018-08-31 09:22:23 +01001001void CaffeParserBase::ParseReluLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001002{
1003 ValidateNumInputsOutputs(layerParam, 1, 1);
1004
1005 const string& name = layerParam.name();
1006 const ReLUParameter& param = layerParam.relu_param();
1007
1008 ActivationDescriptor activationDescriptor;
1009 const float negativeSlope = param.negative_slope();
1010 if (negativeSlope == 0.0f)
1011 {
1012 activationDescriptor.m_Function = ActivationFunction::ReLu;
1013 }
1014 else
1015 {
1016 activationDescriptor.m_Function = ActivationFunction::LeakyReLu;
1017 activationDescriptor.m_A = negativeSlope;
1018 }
1019
1020 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1021 IConnectableLayer* const activationLayer = m_Network->AddActivationLayer(activationDescriptor, name.c_str());
1022 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(activationLayer->GetInputSlot(0));
1023 activationLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1024 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0));
1025}
1026
telsoa01c577f2c2018-08-31 09:22:23 +01001027void CaffeParserBase::ParseLRNLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001028{
1029 ValidateNumInputsOutputs(layerParam, 1, 1);
1030
1031 LRNParameter param = layerParam.lrn_param();
1032
1033 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1034
telsoa01c577f2c2018-08-31 09:22:23 +01001035 // Ignored BATCH NORMALIZATION Caffe Parameters.
1036 // Ignored MVN Caffe Parameters.
1037 // Ignored LRN Caffe Parameters.
telsoa014fcda012018-03-09 14:13:49 +00001038 // Engine
1039
1040 NormalizationDescriptor normalizationDescriptor;
1041 if (param.has_norm_region())
1042 {
1043 LRNParameter_NormRegion n = param.norm_region();
1044 switch (n)
1045 {
1046 case LRNParameter_NormRegion_ACROSS_CHANNELS:
1047 {
1048 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1049 break;
1050 }
1051 case LRNParameter_NormRegion_WITHIN_CHANNEL:
1052 {
1053 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
1054 break;
1055 }
1056 default:
telsoa01c577f2c2018-08-31 09:22:23 +01001057 {
1058 throw ParseException(
1059 boost::str(
1060 boost::format(
1061 "Unknown region %1% for LRN layer %2% %3%") %
1062 n %
1063 layerParam.name() %
1064 CHECK_LOCATION().AsString()));
1065 }
telsoa014fcda012018-03-09 14:13:49 +00001066 }
1067 }
1068 else
1069 {
telsoa01c577f2c2018-08-31 09:22:23 +01001070 // Caffe defaults to normalization across channels.
telsoa014fcda012018-03-09 14:13:49 +00001071 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1072 }
1073
1074 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1075 if (param.has_local_size())
1076 {
1077 normalizationDescriptor.m_NormSize = param.local_size();
1078 }
1079 else
1080 {
telsoa01c577f2c2018-08-31 09:22:23 +01001081 throw ParseException(
1082 boost::str(
1083 boost::format(
1084 "local_size not defined for LRN layer %1% %2%") %
1085 layerParam.name() %
1086 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001087 }
1088
1089 if (param.has_alpha())
1090 {
1091 normalizationDescriptor.m_Alpha = param.alpha();
1092 normalizationDescriptor.m_Alpha /= boost::numeric_cast<float>(param.local_size());
1093 }
1094 else
1095 {
telsoa01c577f2c2018-08-31 09:22:23 +01001096 throw ParseException(
1097 boost::str(
1098 boost::format(
1099 "Alpha not defined for LRN layer %1% %2%") %
1100 layerParam.name() %
1101 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001102 }
1103 if (param.has_beta())
1104 {
1105 normalizationDescriptor.m_Beta = param.beta();
1106 }
1107 else
1108 {
telsoa01c577f2c2018-08-31 09:22:23 +01001109 throw ParseException(
1110 boost::str(
1111 boost::format(
1112 "Beta not defined for LRN layer %1% %2%") %
1113 layerParam.name() %
1114 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001115 }
telsoa01c577f2c2018-08-31 09:22:23 +01001116
telsoa014fcda012018-03-09 14:13:49 +00001117 if (param.has_k())
1118 {
1119 normalizationDescriptor.m_K = param.k();
1120 }
1121 else
telsoa01c577f2c2018-08-31 09:22:23 +01001122 {
telsoa014fcda012018-03-09 14:13:49 +00001123 normalizationDescriptor.m_K = 1;
telsoa01c577f2c2018-08-31 09:22:23 +01001124 }
telsoa014fcda012018-03-09 14:13:49 +00001125
1126 IConnectableLayer* const normLayer = m_Network->AddNormalizationLayer(normalizationDescriptor,
1127 layerParam.name().c_str());
1128 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(normLayer->GetInputSlot(0));
1129 normLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1130
1131 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0));
1132}
1133
telsoa01c577f2c2018-08-31 09:22:23 +01001134void CaffeParserBase::ParseInnerProductLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001135{
1136 InnerProductParameter param = layerParam.inner_product_param();
1137
1138 ValidateNumInputsOutputs(layerParam, 1, 1);
1139
1140 unsigned int outputSize = param.num_output();
1141
telsoa01c577f2c2018-08-31 09:22:23 +01001142 // Ignored Caffe Parameters:
telsoa014fcda012018-03-09 14:13:49 +00001143 // Weight Filler
1144 // Bias Filler
1145 // Engine
1146 // Axis
1147
1148 FullyConnectedDescriptor tensorFullyConnectedDescriptor;
1149
1150 if (param.has_transpose())
1151 {
telsoa01c577f2c2018-08-31 09:22:23 +01001152 // If true, assumes transposed weights.
telsoa014fcda012018-03-09 14:13:49 +00001153 tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = param.transpose();
1154 }
1155 else
1156 {
telsoa01c577f2c2018-08-31 09:22:23 +01001157 // Caffe defaults to transposed.
telsoa014fcda012018-03-09 14:13:49 +00001158 tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = true;
1159 }
1160
1161 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1162
1163 TensorInfo weightInfo;
1164 TensorInfo biasInfo;
1165
telsoa01c577f2c2018-08-31 09:22:23 +01001166 // Allows implicit flattening of extra dimensions.
telsoa014fcda012018-03-09 14:13:49 +00001167 unsigned int inputSize = inputInfo.GetShape()[1];
1168 for (unsigned int i = 2; i < inputInfo.GetNumDimensions(); ++i)
1169 {
1170 inputSize *= inputInfo.GetShape()[i];
1171 }
1172
telsoa01c577f2c2018-08-31 09:22:23 +01001173 const float* weightDataPtr = GetArrayPtrFromBlob(layerParam, 0);
telsoa014fcda012018-03-09 14:13:49 +00001174 const unsigned int swTD[2] = { outputSize, inputSize };
telsoa01c577f2c2018-08-31 09:22:23 +01001175 ConstTensor weights(TensorInfo(2, swTD, DataType::Float32), weightDataPtr);
telsoa014fcda012018-03-09 14:13:49 +00001176
1177 tensorFullyConnectedDescriptor.m_BiasEnabled = true;
telsoa01c577f2c2018-08-31 09:22:23 +01001178 // Todo: check whether bias enabled.
telsoa014fcda012018-03-09 14:13:49 +00001179 armnn::IConnectableLayer* fullyConnectedLayer = nullptr;
1180 if (tensorFullyConnectedDescriptor.m_BiasEnabled)
1181 {
1182 // BIAS VALUE
telsoa01c577f2c2018-08-31 09:22:23 +01001183 const float* biasDataPtr = GetArrayPtrFromBlob(layerParam, 1);
telsoa014fcda012018-03-09 14:13:49 +00001184
1185 const unsigned int sbTD[1] = { outputSize };
1186
telsoa01c577f2c2018-08-31 09:22:23 +01001187 ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasDataPtr);
telsoa014fcda012018-03-09 14:13:49 +00001188
Matteo Martincighfc598e12019-05-14 10:36:13 +01001189 fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1190 weights,
1191 Optional<ConstTensor>(biases),
1192 layerParam.name().c_str());
telsoa014fcda012018-03-09 14:13:49 +00001193 }
1194 else
1195 {
Matteo Martincighfc598e12019-05-14 10:36:13 +01001196 fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1197 weights,
1198 EmptyOptional(),
1199 layerParam.name().c_str());
telsoa014fcda012018-03-09 14:13:49 +00001200 }
1201
1202 TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32);
1203 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(fullyConnectedLayer->GetInputSlot(0));
1204 fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1205 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0));
1206}
1207
telsoa01c577f2c2018-08-31 09:22:23 +01001208void CaffeParserBase::ParseSoftmaxLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001209{
1210 ValidateNumInputsOutputs(layerParam, 1, 1);
1211
1212 SoftmaxParameter param = layerParam.softmax_param();
1213
1214 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1215
telsoa01c577f2c2018-08-31 09:22:23 +01001216 // Ignored Caffe Parameters:
telsoa014fcda012018-03-09 14:13:49 +00001217 // axis
1218 // Engine
1219
1220 armnn::SoftmaxDescriptor softmaxDescriptor;
Francis Murtagh3b938352019-07-26 15:44:17 +01001221 softmaxDescriptor.m_Axis = 1;
telsoa014fcda012018-03-09 14:13:49 +00001222 armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer(
1223 softmaxDescriptor,
1224 layerParam.name().c_str());
1225 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(softmaxLayer->GetInputSlot(0));
1226 softmaxLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1227 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0));
1228}
1229
telsoa01c577f2c2018-08-31 09:22:23 +01001230void CaffeParserBase::ParseEltwiseLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001231{
1232 ValidateNumInputsOutputs(layerParam, 2, 1);
1233
1234 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1235
telsoa01c577f2c2018-08-31 09:22:23 +01001236 // Ignored Caffe Parameters:
telsoa014fcda012018-03-09 14:13:49 +00001237 // coeff
1238
telsoa01c577f2c2018-08-31 09:22:23 +01001239 EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM; // Defaults to sum as per caffe.
telsoa014fcda012018-03-09 14:13:49 +00001240
1241 if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
1242 {
1243 operation = layerParam.eltwise_param().operation();
1244 }
1245
1246 armnn::IConnectableLayer* newLayer = nullptr;
1247 switch (operation)
1248 {
1249 case EltwiseParameter_EltwiseOp_SUM:
1250 {
1251 newLayer = m_Network->AddAdditionLayer(layerParam.name().c_str());
1252 break;
1253 }
1254 case EltwiseParameter_EltwiseOp_PROD:
1255 {
1256 newLayer = m_Network->AddMultiplicationLayer(layerParam.name().c_str());
1257 break;
1258 }
1259 default:
1260 {
telsoa01c577f2c2018-08-31 09:22:23 +01001261 throw ParseException(
1262 boost::str(
1263 boost::format(
1264 "Unsupported operation %1% in Eltwise layer %2% %3%") %
1265 operation %
1266 layerParam.name() %
1267 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001268 }
1269 }
1270
1271 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(newLayer->GetInputSlot(0));
1272 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(1)).Connect(newLayer->GetInputSlot(1));
1273 newLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1274 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0));
1275}
1276
telsoa01c577f2c2018-08-31 09:22:23 +01001277void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001278{
1279 unsigned int numInputs = static_cast<unsigned int>(layerParam.bottom_size());
telsoa01c577f2c2018-08-31 09:22:23 +01001280 // We assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3).
telsoa014fcda012018-03-09 14:13:49 +00001281 unsigned int concatDim = 1;
1282 unsigned int numOfDims = 4;
1283
telsoa01c577f2c2018-08-31 09:22:23 +01001284 // we only consider 4-D tensor here
1285 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);
telsoa014fcda012018-03-09 14:13:49 +00001286 std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
1287
1288 unsigned int mergeDim = 0;
1289 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
1290 {
1291 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
1292 layerParam.bottom(boost::numeric_cast<int>(viewIndex))).GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01001293 // Checks whether the dimensions of the input tensors are actually 4.
telsoa014fcda012018-03-09 14:13:49 +00001294 if (inputInfo.GetNumDimensions()!=4)
1295 {
telsoa01c577f2c2018-08-31 09:22:23 +01001296 throw ParseException(
1297 boost::str(
1298 boost::format(
1299 "The number of dimensions for input tensors of "
1300 "the concatenation op should be 4. Inputs of %1% has "
1301 "%2% dimensions. %3%") %
1302 layerParam.name() %
1303 inputInfo.GetNumDimensions() %
1304 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001305 }
1306
1307 mergeDimSizes[0] = inputInfo.GetShape()[0];
1308 mergeDimSizes[1] = inputInfo.GetShape()[1];
1309 mergeDimSizes[2] = inputInfo.GetShape()[2];
1310 mergeDimSizes[3] = inputInfo.GetShape()[3];
1311
1312 for (unsigned int j = 0; j < concatDim; ++j)
1313 {
1314 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1315 }
1316
1317 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
1318 mergeDim += mergeDimSizes[concatDim];
1319
1320 for (unsigned int j = concatDim+1; j < numOfDims; ++j)
1321 {
1322 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1323 }
1324 }
1325 mergeDimSizes[concatDim] = mergeDim;
1326
Jim Flynn906f9462019-05-10 13:55:21 +01001327 armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
telsoa014fcda012018-03-09 14:13:49 +00001328 for (unsigned int i = 0; i < numInputs; ++i)
1329 {
1330 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i)));
1331 outputSlot.Connect(concatlayer->GetInputSlot(i));
1332 }
1333
1334 concatlayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(numOfDims, mergeDimSizes.data(), DataType::Float32));
1335 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0));
1336}
1337
telsoa01c577f2c2018-08-31 09:22:23 +01001338void CaffeParserBase::ParseBatchNormLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001339{
1340 ValidateNumInputsOutputs(layerParam, 1, 1);
1341
1342 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1343
1344 string name = layerParam.name();
1345
1346 BatchNormParameter param = layerParam.batch_norm_param();
1347 // If use_global_stats is not explicitly set in the model, assume it to be true (its default value
1348 // when the network is in the testing phase).
1349 if (param.has_use_global_stats())
1350 {
1351 if (!param.use_global_stats())
1352 {
telsoa01c577f2c2018-08-31 09:22:23 +01001353 throw ParseException(
1354 boost::str(
1355 boost::format(
1356 "Error parsing Batch Norm layer '%1%': "
1357 "Parameter 'use_global_stats' is set to false, which is "
1358 "unsupported (value used for training). %2%") %
1359 name %
1360 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001361 }
1362 }
1363
1364 BatchNormalizationDescriptor desc;
1365 desc.m_Eps = param.eps();
1366
1367 unsigned int channels = inputInfo.GetShape()[1];
1368 unsigned int shape[] = {channels};
1369
1370 vector<float> meanData(channels);
1371 GetDataFromBlob(layerParam, meanData, 0);
1372
1373 vector<float> varianceData(channels);
1374 GetDataFromBlob(layerParam, varianceData, 1);
1375
telsoa01c577f2c2018-08-31 09:22:23 +01001376 // Reads moving average factor and applies scaling (if required).
surmeh013537c2c2018-05-18 16:31:43 +01001377 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
1378 const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
1379 if(movingAverageFactor != 0.0f)
1380 {
1381 const float scaleFactor = 1.0f / movingAverageFactor;
1382 auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
1383
1384 std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
1385 std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
1386 }
1387
telsoa01c577f2c2018-08-31 09:22:23 +01001388 // Identifies scale operation.
telsoa014fcda012018-03-09 14:13:49 +00001389 vector<float> betaData(channels, 0.0f);
1390 vector<float> gammaData(channels, 1.0f);
1391
1392 ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1393 ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1394 ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1395 ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1396
1397 armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1398 mean, variance, beta, gamma, name.c_str());
1399 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1400 batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1401 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1402}
1403
telsoa01c577f2c2018-08-31 09:22:23 +01001404void CaffeParserBase::ParseScaleLayer(const LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001405{
telsoa01c577f2c2018-08-31 09:22:23 +01001406 // Current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance.
telsoa014fcda012018-03-09 14:13:49 +00001407 ValidateNumInputsOutputs(layerParam, 1, 1);
1408
1409 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1410
1411 string name = layerParam.name();
1412
1413 ScaleParameter param = layerParam.scale_param();
1414 if (param.axis() != 1)
1415 {
1416 // Would have to use something other than BatchNormalizationLayer in this case
telsoa01c577f2c2018-08-31 09:22:23 +01001417 throw ParseException(
1418 boost::str(
1419 boost::format(
1420 "Loading Scale Layer: Only axis 1 is supported currently. "
1421 "Layer=%1% Axis=%2% %3%") %
1422 layerParam.name() %
1423 param.axis() %
1424 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001425 }
1426
1427 unsigned int channels = inputInfo.GetShape()[1];
1428 unsigned int shape[] = {channels};
1429
1430 BatchNormalizationDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001431 desc.m_Eps = 0.0f; // Don't need epsilon if variance is 1.
telsoa014fcda012018-03-09 14:13:49 +00001432 vector<float> meanData(channels, 0.0f);
1433 vector<float> varianceData(channels, 1.0f);
1434 vector<float> betaData(channels, 0.0f);
1435 vector<float> gammaData(channels);
1436
1437 GetDataFromBlob(layerParam, gammaData, 0);
1438
1439 if(param.has_bias_term())
1440 {
1441 GetDataFromBlob(layerParam, betaData, 1);
1442 }
1443
1444 ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1445 ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1446 ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1447 ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1448
1449 armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1450 mean, variance, beta, gamma, name.c_str());
1451 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1452 batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1453 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1454}
1455
telsoa01c577f2c2018-08-31 09:22:23 +01001456void CaffeParserBase::ParseSplitLayer(const caffe::LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001457{
telsoa01c577f2c2018-08-31 09:22:23 +01001458 // Used in caffe to duplicate memory - not necessary in armnn.
telsoa014fcda012018-03-09 14:13:49 +00001459 if (layerParam.bottom_size() != 1)
1460 {
telsoa01c577f2c2018-08-31 09:22:23 +01001461 throw ParseException(
1462 boost::str(
1463 boost::format(
1464 "Split layer '%1%' should have exactly 1 bottom. "
1465 "#bottoms=%2% %3%") %
1466 layerParam.name() %
1467 layerParam.bottom_size() %
1468 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001469 }
1470 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
1471 for (int i = 0; i < layerParam.top_size(); i++)
1472 {
1473 SetArmnnOutputSlotForCaffeTop(layerParam.top(i), outputSlot);
1474 }
1475}
1476
telsoa01c577f2c2018-08-31 09:22:23 +01001477void CaffeParserBase::ParseDropoutLayer(const caffe::LayerParameter& layerParam)
telsoa014fcda012018-03-09 14:13:49 +00001478{
telsoa01c577f2c2018-08-31 09:22:23 +01001479 // Ignored for inference, so patch the single input to its single output.
telsoa014fcda012018-03-09 14:13:49 +00001480 if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
1481 {
telsoa01c577f2c2018-08-31 09:22:23 +01001482 throw ParseException(
1483 boost::str(
1484 boost::format(
1485 "Dropout layer '%1%' should have exactly 1 bottom and 1 top. "
1486 "#bottoms=%2% #tops=%3% %4%") %
1487 layerParam.name() %
1488 layerParam.bottom_size() %
1489 layerParam.top_size() %
1490 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001491 }
1492 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)));
1493}
1494
telsoa01c577f2c2018-08-31 09:22:23 +01001495void CaffeParserBase::TrackInputBinding(armnn::IConnectableLayer* layer,
telsoa014fcda012018-03-09 14:13:49 +00001496 armnn::LayerBindingId id,
1497 const armnn::TensorInfo& tensorInfo)
1498{
1499 return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo);
1500}
1501
telsoa01c577f2c2018-08-31 09:22:23 +01001502void CaffeParserBase::TrackOutputBinding(armnn::IConnectableLayer* layer,
telsoa014fcda012018-03-09 14:13:49 +00001503 armnn::LayerBindingId id,
1504 const armnn::TensorInfo& tensorInfo)
1505{
1506 return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo);
1507}
1508
telsoa01c577f2c2018-08-31 09:22:23 +01001509void CaffeParserBase::TrackBindingPoint(armnn::IConnectableLayer* layer,
telsoa014fcda012018-03-09 14:13:49 +00001510 armnn::LayerBindingId id,
1511 const armnn::TensorInfo& tensorInfo,
1512 const char* bindingPointDesc,
1513 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
1514{
1515 const std::string layerName = layer->GetName();
1516 auto it = nameToBindingInfo.find(layerName);
1517 if (it == nameToBindingInfo.end())
1518 {
1519 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
1520 }
1521 else
1522 {
telsoa01c577f2c2018-08-31 09:22:23 +01001523 throw ParseException(
1524 boost::str(
1525 boost::format(
1526 "Id %1% used by more than one %2% layer %3%") %
1527 id %
1528 bindingPointDesc %
1529 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001530 }
1531}
1532
telsoa01c577f2c2018-08-31 09:22:23 +01001533armnn::IOutputSlot& CaffeParserBase::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const
telsoa014fcda012018-03-09 14:13:49 +00001534{
1535 auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1536 if (it != m_ArmnnOutputSlotForCaffeTop.end())
1537 {
1538 return *it->second;
1539 }
1540 else
1541 {
telsoa01c577f2c2018-08-31 09:22:23 +01001542 throw ParseException(
1543 boost::str(
1544 boost::format(
1545 "Could not find armnn output slot for Caffe top '%1%' %2%") %
1546 caffeTopName %
1547 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001548 }
1549}
1550
telsoa01c577f2c2018-08-31 09:22:23 +01001551void CaffeParserBase::SetArmnnOutputSlotForCaffeTop(
1552 const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot)
telsoa014fcda012018-03-09 14:13:49 +00001553{
1554 auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1555 if (it == m_ArmnnOutputSlotForCaffeTop.end())
1556 {
1557 m_ArmnnOutputSlotForCaffeTop[caffeTopName] = &armnnOutputSlot;
1558 }
1559 else
1560 {
telsoa01c577f2c2018-08-31 09:22:23 +01001561 throw ParseException(
1562 boost::str(
1563 boost::format(
1564 "Attempting to add duplicate entry for Caffe top '%1%' %2%") %
1565 caffeTopName %
1566 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001567 }
1568}
1569
telsoa01c577f2c2018-08-31 09:22:23 +01001570// Note: can move to CaffeParser when/if we optimise the text/string format
1571// to load on a layer by layer basis
1572void CaffeParserBase::ResolveInPlaceLayers(caffe::NetParameter& netParameter)
telsoa014fcda012018-03-09 14:13:49 +00001573{
telsoa01c577f2c2018-08-31 09:22:23 +01001574 // Finds layers with the same top.
telsoa014fcda012018-03-09 14:13:49 +00001575 std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
1576 for (int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
1577 {
1578 caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
telsoa01c577f2c2018-08-31 09:22:23 +01001579 std::string name = layer.name();
telsoa014fcda012018-03-09 14:13:49 +00001580 for (int i = 0; i < layer.top_size(); ++i)
1581 {
1582 layersByTop[layer.top(i)].push_back(&layer);
1583 }
1584 }
1585
telsoa01c577f2c2018-08-31 09:22:23 +01001586 // For each set of layers with the same top, resolves them to a linear chain rather than in-place layers.
telsoa014fcda012018-03-09 14:13:49 +00001587 // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
1588 for (auto layersWithSameTopIt : layersByTop)
1589 {
1590 const std::string& top = layersWithSameTopIt.first;
1591 const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
1592
telsoa01c577f2c2018-08-31 09:22:23 +01001593 // Chains the layers together in the order that they are listed in the prototxt (hopefully this is correct).
telsoa014fcda012018-03-09 14:13:49 +00001594 // Note that the last layer will not have its top modified so that other layers will continue to reference it.
1595 for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
1596 {
1597 caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
1598 caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
1599 if (layer1.top_size() != 1)
1600 {
telsoa01c577f2c2018-08-31 09:22:23 +01001601 throw ParseException(
1602 boost::str(
1603 boost::format(
1604 "Node '%1%' is an in-place layer but doesn't have exactly one "
1605 "top. It has %2% instead. %3%") %
1606 layer1.name() %
1607 layer1.top_size() %
1608 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001609 }
1610 std::string newTop = layer1.name() + "_top";
1611 layer1.set_top(0, newTop);
1612 if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
1613 {
telsoa01c577f2c2018-08-31 09:22:23 +01001614 throw ParseException(
1615 boost::str(
1616 boost::format(
1617 "Node '%1%' is an in-place layer but "
1618 "doesn't have exactly one bottom, or it doesn't match its top. "
1619 "#bottoms=%2%, first bottom is %3%, top is %4% %5%") %
1620 layer2.name() %
1621 layer2.bottom(0) %
1622 top %
1623 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001624 }
1625 layer2.set_bottom(0, newTop);
1626 }
1627 }
1628}
1629
telsoa01c577f2c2018-08-31 09:22:23 +01001630// Note: can move to CaffeParser when/if we optimise the text/string format
1631// to load on a layer by layer basis
1632void CaffeParserBase::LoadNetParam(NetParameter& netParameter)
telsoa014fcda012018-03-09 14:13:49 +00001633{
telsoa01c577f2c2018-08-31 09:22:23 +01001634 // Caffe models sometimes have an implicit input layer.
1635 // In that case, add an explicit one.
telsoa014fcda012018-03-09 14:13:49 +00001636 if (netParameter.input_size() > 0)
1637 {
1638 LayerParameter* newLayer = netParameter.add_layer();
1639
1640 newLayer->set_type("Input");
1641 newLayer->set_name(netParameter.input(0));
1642 newLayer->add_top(netParameter.input(0));
1643
1644 InputParameter* inputParam = newLayer->mutable_input_param();
1645 BlobShape* shape = inputParam->add_shape();
1646
1647 int dim_size = netParameter.input_dim_size();
1648 for (int i = 0; i < dim_size; ++i)
1649 {
1650 shape->add_dim(netParameter.input_dim(i));
1651 }
1652 }
1653
telsoa01c577f2c2018-08-31 09:22:23 +01001654 // Replaces in-place layers with regular ones to make the rest of the parsing easier.
telsoa014fcda012018-03-09 14:13:49 +00001655 ResolveInPlaceLayers(netParameter);
1656
telsoa01c577f2c2018-08-31 09:22:23 +01001657 // Creates a lookup of Caffe layers by name.
telsoa014fcda012018-03-09 14:13:49 +00001658 for (int i = 0; i < netParameter.layer_size(); ++i)
1659 {
1660 const caffe::LayerParameter& layer = netParameter.layer(i);
1661 for (int i = 0; i < layer.top_size(); ++i)
1662 {
1663 m_CaffeLayersByTopName[layer.top(i)] = &layer;
1664 }
1665 }
1666
telsoa01c577f2c2018-08-31 09:22:23 +01001667 // Finds the output layers the user requested.
telsoa014fcda012018-03-09 14:13:49 +00001668 std::vector<const caffe::LayerParameter*> targetLayers;
1669 for (const std::string& requestedOutputName : m_RequestedOutputs)
1670 {
1671 auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
1672 if (nodeIt == m_CaffeLayersByTopName.end())
1673 {
telsoa01c577f2c2018-08-31 09:22:23 +01001674 throw ParseException(
1675 boost::str(
1676 boost::format(
1677 "Couldn't find requested output layer '%1%' in graph %2%") %
1678 requestedOutputName %
1679 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001680 }
1681 targetLayers.push_back(nodeIt->second);
1682 }
1683
telsoa01c577f2c2018-08-31 09:22:23 +01001684 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
telsoa014fcda012018-03-09 14:13:49 +00001685 std::vector<const caffe::LayerParameter*> sortedNodes;
1686 if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
1687 targetLayers,
1688 [this](const caffe::LayerParameter* node)
1689 {
1690 return GetInputs(*node);
1691 },
1692 sortedNodes))
1693 {
telsoa01c577f2c2018-08-31 09:22:23 +01001694 throw ParseException(
1695 boost::str(
1696 boost::format(
1697 "Cycle detected in graph. #nodes: %1% %2%") %
1698 sortedNodes.size() %
1699 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001700 }
1701
telsoa01c577f2c2018-08-31 09:22:23 +01001702 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
telsoa014fcda012018-03-09 14:13:49 +00001703 for (const caffe::LayerParameter* current : sortedNodes)
1704 {
1705 auto it = ms_CaffeLayerNameToParsingFunctions.find(current->type());
1706 if (it == ms_CaffeLayerNameToParsingFunctions.end())
1707 {
telsoa01c577f2c2018-08-31 09:22:23 +01001708 throw ParseException(
1709 boost::str(
1710 boost::format("Unsupported layer type: '%1%' for layer %2% %3%") %
1711 current->type() %
1712 current->name() %
1713 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001714 }
1715 auto func = it->second;
1716 (this->*func)(*current);
1717 }
1718
telsoa01c577f2c2018-08-31 09:22:23 +01001719 // Adds ArmNN output layers connected to each requested output.
telsoa014fcda012018-03-09 14:13:49 +00001720 for (const std::string& requestedOutput : m_RequestedOutputs)
1721 {
1722 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
1723
1724 const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
1725 m_NetworkOutputsBindingInfo.size());
1726 armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
1727 outputSlot.Connect(outputLayer->GetInputSlot(0));
1728
1729 TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
1730 }
1731}
1732
telsoa01c577f2c2018-08-31 09:22:23 +01001733INetworkPtr CaffeParserBase::CreateNetworkFromTextFile(const char* graphFile,
telsoa014fcda012018-03-09 14:13:49 +00001734 const std::map<std::string, armnn::TensorShape>& inputShapes,
1735 const std::vector<std::string>& requestedOutputs)
1736{
1737 FILE* fd = fopen(graphFile, "r");
1738
1739 if (fd == nullptr)
1740 {
telsoa01c577f2c2018-08-31 09:22:23 +01001741 throw FileNotFoundException(
1742 boost::str(
1743 boost::format(
1744 "Failed to open graph file: %1% %2%") %
1745 graphFile %
1746 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001747 }
1748
telsoa01c577f2c2018-08-31 09:22:23 +01001749 // Parses the file into a message.
telsoa014fcda012018-03-09 14:13:49 +00001750 NetParameter netParam;
1751 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
1752 bool success = google::protobuf::TextFormat::Parse(input, &netParam);
1753 delete input;
1754 fclose(fd);
1755
1756 if (!success)
1757 {
telsoa01c577f2c2018-08-31 09:22:23 +01001758 throw ParseException(
1759 boost::str(
1760 boost::format(
1761 "Failed to parse graph file: %1% %2%") %
1762 graphFile %
1763 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001764 }
1765
1766 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1767}
1768
telsoa01c577f2c2018-08-31 09:22:23 +01001769INetworkPtr CaffeParserBase::CreateNetworkFromString(const char* protoText,
telsoa014fcda012018-03-09 14:13:49 +00001770 const std::map<std::string, armnn::TensorShape>& inputShapes,
1771 const std::vector<std::string>& requestedOutputs)
1772{
telsoa01c577f2c2018-08-31 09:22:23 +01001773 // Parses the string into a message.
telsoa014fcda012018-03-09 14:13:49 +00001774 NetParameter netParam;
1775 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
1776
1777 if (!success)
1778 {
telsoa01c577f2c2018-08-31 09:22:23 +01001779 throw ParseException(
1780 boost::str(
1781 boost::format(
1782 "Failed to parse graph string %1%") %
1783 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001784 }
1785
1786 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1787}
1788
1789INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile,
1790 const std::map<std::string, armnn::TensorShape>& inputShapes,
1791 const std::vector<std::string>& requestedOutputs)
1792{
1793 FILE* fd = fopen(graphFile, "rb");
1794
1795 if (fd == nullptr)
1796 {
telsoa01c577f2c2018-08-31 09:22:23 +01001797 throw FileNotFoundException(
1798 boost::str(
1799 boost::format(
1800 "Failed to open graph file at: %1% %2%") %
1801 graphFile %
1802 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001803 }
1804
telsoa01c577f2c2018-08-31 09:22:23 +01001805 // Parses the file into a message.
telsoa014fcda012018-03-09 14:13:49 +00001806 NetParameter netParam;
1807
1808 FileInputStream inStream(fileno(fd));
1809 CodedInputStream codedStream(&inStream);
1810 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
1811 bool success = netParam.ParseFromCodedStream(&codedStream);
1812 fclose(fd);
1813
1814 if (!success)
1815 {
telsoa01c577f2c2018-08-31 09:22:23 +01001816 throw ParseException(
1817 boost::str(
1818 boost::format(
1819 "Failed to parse protobuf file: %1% %2%") %
1820 graphFile %
1821 CHECK_LOCATION().AsString()));
telsoa014fcda012018-03-09 14:13:49 +00001822 }
1823
1824 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1825}
1826
telsoa01c577f2c2018-08-31 09:22:23 +01001827// Note: can move to CaffeParser when/if we optimise the text/string format
1828// to load on a layer by layer basis
1829INetworkPtr CaffeParserBase::CreateNetworkFromNetParameter(NetParameter& netParam,
telsoa014fcda012018-03-09 14:13:49 +00001830 const std::map<std::string, armnn::TensorShape>& inputShapes,
1831 const std::vector<std::string>& requestedOutputs)
1832{
1833 m_NetworkInputsBindingInfo.clear();
1834 m_NetworkOutputsBindingInfo.clear();
1835
1836 m_Network = INetwork::Create();
1837
1838 m_InputShapes = inputShapes;
1839 if (requestedOutputs.size() == 0)
1840 {
1841 throw ParseException("requestedOutputs must have at least one entry");
1842 }
1843 m_RequestedOutputs = requestedOutputs;
1844
1845 try
1846 {
1847 LoadNetParam(netParam);
1848 }
1849 catch (const ParseException& e)
1850 {
1851 Cleanup();
1852 throw e;
1853 }
1854
1855 Cleanup();
1856
1857 return move(m_Network);
1858}
1859
telsoa01c577f2c2018-08-31 09:22:23 +01001860void CaffeParserBase::Cleanup() {
telsoa014fcda012018-03-09 14:13:49 +00001861 // cleanup, in case we reuse this parser
telsoa014fcda012018-03-09 14:13:49 +00001862 m_InputShapes.clear();
1863 m_RequestedOutputs.clear();
1864 m_ArmnnOutputSlotForCaffeTop.clear();
telsoa01c577f2c2018-08-31 09:22:23 +01001865 // NOTE: when we get the text/string format
1866 // optimised for memory then this data structure can
1867 // also move to the CaffeParser class
1868 m_CaffeLayersByTopName.clear();
telsoa014fcda012018-03-09 14:13:49 +00001869}
1870
1871}