telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 5 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 6 | #include "Network.hpp" |
| 7 | #include "Graph.hpp" |
| 8 | #include "Layer.hpp" |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 9 | #include "DeviceSpec.hpp" |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 10 | #include "Optimizer.hpp" |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 11 | #include "SubGraphSelector.hpp" |
| 12 | #include "BackendSettings.hpp" |
David Beck | ac42efd | 2018-09-26 17:41:13 +0100 | [diff] [blame] | 13 | #include "optimizations/All.hpp" |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 14 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 15 | #include <backendsCommon/CpuTensorHandle.hpp> |
| 16 | #include <backendsCommon/WorkloadFactory.hpp> |
David Beck | 263e349 | 2018-11-09 14:46:40 +0000 | [diff] [blame] | 17 | #include <backendsCommon/BackendRegistry.hpp> |
| 18 | #include <backendsCommon/IBackendInternal.hpp> |
David Beck | ac42efd | 2018-09-26 17:41:13 +0100 | [diff] [blame] | 19 | |
| 20 | #include <armnn/Exceptions.hpp> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 21 | #include <armnn/Utils.hpp> |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 22 | #include <armnn/TypesUtils.hpp> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 23 | |
| 24 | #include <fcntl.h> |
| 25 | #include <algorithm> |
| 26 | #include <fstream> |
| 27 | #include <memory> |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 28 | #include <vector> |
| 29 | #include <algorithm> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 30 | |
| 31 | #include <boost/assert.hpp> |
| 32 | #include <boost/format.hpp> |
| 33 | #include <boost/log/trivial.hpp> |
| 34 | #include <boost/numeric/conversion/converter_policies.hpp> |
| 35 | #include <boost/cast.hpp> |
| 36 | |
| 37 | namespace armnn |
| 38 | { |
| 39 | |
| 40 | armnn::INetwork* INetwork::CreateRaw() |
| 41 | { |
| 42 | return new Network(); |
| 43 | } |
| 44 | |
| 45 | armnn::INetworkPtr INetwork::Create() |
| 46 | { |
| 47 | return INetworkPtr(CreateRaw(), &INetwork::Destroy); |
| 48 | } |
| 49 | |
| 50 | void INetwork::Destroy(INetwork* network) |
| 51 | { |
| 52 | delete boost::polymorphic_downcast<Network*>(network); |
| 53 | } |
| 54 | |
| 55 | Status Network::PrintGraph() |
| 56 | { |
| 57 | m_Graph->Print(); |
| 58 | return Status::Success; |
| 59 | } |
| 60 | |
| 61 | void IOptimizedNetwork::Destroy(IOptimizedNetwork* network) |
| 62 | { |
| 63 | delete boost::polymorphic_downcast<OptimizedNetwork*>(network); |
| 64 | } |
| 65 | |
| 66 | Status OptimizedNetwork::PrintGraph() |
| 67 | { |
| 68 | m_Graph->Print(); |
| 69 | return Status::Success; |
| 70 | } |
| 71 | |
surmeh01 | bceff2f | 2018-03-29 16:29:27 +0100 | [diff] [blame] | 72 | Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const |
| 73 | { |
| 74 | return m_Graph->SerializeToDot(stream); |
| 75 | } |
| 76 | |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 77 | struct OptimizationResult |
| 78 | { |
| 79 | bool m_Warning; |
| 80 | bool m_Error; |
| 81 | |
| 82 | OptimizationResult() |
| 83 | : m_Warning(false) |
| 84 | , m_Error(false) |
| 85 | {} |
| 86 | }; |
| 87 | |
| 88 | void ReportError(const std::string& errorMessage, |
| 89 | Optional<std::vector<std::string>&> errorMessages) |
| 90 | { |
| 91 | std::stringstream fullErrorMessage; |
| 92 | fullErrorMessage << "ERROR: " << errorMessage; |
| 93 | BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str(); |
| 94 | if (errorMessages) |
| 95 | { |
| 96 | errorMessages.value().push_back(fullErrorMessage.str()); |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | void ReportWarning(const std::string& warningMessage, |
| 101 | Optional<std::vector<std::string>&> warningMessages) |
| 102 | { |
| 103 | std::stringstream fullWarningMessage; |
| 104 | fullWarningMessage << "WARNING: " << warningMessage; |
| 105 | BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str(); |
| 106 | if (warningMessages) |
| 107 | { |
| 108 | warningMessages.value().push_back(fullWarningMessage.str()); |
| 109 | } |
| 110 | } |
| 111 | |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 112 | bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages) |
| 113 | { |
| 114 | bool noErrors = true; |
| 115 | unsigned int numOutputs = layer->GetNumOutputSlots(); |
| 116 | for (unsigned int i = 0; i < numOutputs; i++) { |
| 117 | const OutputSlot &outputSlot = layer->GetOutputSlot(i); |
| 118 | const TensorInfo &info = outputSlot.GetTensorInfo(); |
| 119 | if (DataType::QuantisedAsymm8 == info.GetDataType()) { |
| 120 | if (0.f == info.GetQuantizationScale()) { |
| 121 | noErrors = false; |
| 122 | std::stringstream ss; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 123 | ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 124 | << " (" << layer->GetNameStr() << ") is of type" |
| 125 | << " Quantized 8 bit but its scale parameter has not been set"; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 126 | ReportError(ss.str(), errMessages); |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 127 | } |
| 128 | } |
| 129 | } |
| 130 | return noErrors; |
| 131 | } |
| 132 | |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 133 | OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr, |
| 134 | BackendSettings& backendSettings, |
| 135 | Graph::Iterator& firstLayer, |
| 136 | Graph::Iterator& lastLayer, |
| 137 | Optional<std::vector<std::string>&> errMessages) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 138 | { |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 139 | OptimizationResult result; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 140 | |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 141 | // Helper lambda to compose meaningful error message before returning with error |
| 142 | auto ReturnWithError = [&](const Layer* layer) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 143 | { |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 144 | std::stringstream failureMsg; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 145 | failureMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) |
| 146 | << " is not supported on any preferred backend " << backendSettings.m_PreferredBackends; |
| 147 | ReportError(failureMsg.str(), errMessages); |
| 148 | |
| 149 | result.m_Error = true; |
| 150 | return result; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 151 | }; |
| 152 | |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 153 | auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends(); |
| 154 | if (availablePreferredBackends.empty()) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 155 | { |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 156 | std::stringstream failureMsg; |
| 157 | failureMsg << "No preferred backends are available"; |
| 158 | ReportError(failureMsg.str(), errMessages); |
| 159 | |
| 160 | result.m_Error = true; |
| 161 | return result; |
| 162 | } |
| 163 | |
| 164 | for (auto it = firstLayer; it != lastLayer; ++it) |
| 165 | { |
| 166 | auto layer = *it; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 167 | DataType dataType = layer->GetDataType(); |
| 168 | std::string reasonIfUnsupported; |
| 169 | bool found = false; |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 170 | if (!CheckScaleSetOnQuantizedType(layer, errMessages)) |
| 171 | { |
| 172 | // don't bomb immediately, find all the quantized outputs |
| 173 | // which haven't had a scale set and report them all back. |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 174 | result.m_Error = true; |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 175 | } |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 176 | |
David Beck | f0b4845 | 2018-10-19 15:20:56 +0100 | [diff] [blame] | 177 | for (const auto& backend : availablePreferredBackends) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 178 | { |
| 179 | // need to set the compute device on the layer |
| 180 | // before we can check if it is supported |
David Beck | 33f0ae0 | 2018-10-18 15:13:56 +0100 | [diff] [blame] | 181 | layer->SetBackendId(backend); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 182 | if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported)) |
| 183 | { |
| 184 | if (dataType == DataType::Float16) |
| 185 | { |
| 186 | if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported) |
| 187 | && layer->GetType() != LayerType::ConvertFp32ToFp16 |
| 188 | && layer->GetType() != LayerType::ConvertFp16ToFp32) |
| 189 | { |
| 190 | // Insert FP16 -> FP32 conversion layer before current layer |
| 191 | std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers = |
| 192 | InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer); |
| 193 | |
| 194 | // Insert FP32 -> FP16 conversion layer after current layer |
| 195 | std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers = |
| 196 | InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer); |
| 197 | |
| 198 | // Assign a supported backend to the newly introduced conversion layers |
David Beck | f0b4845 | 2018-10-19 15:20:56 +0100 | [diff] [blame] | 199 | auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 200 | { |
| 201 | bool supportedBackendFound = false; |
| 202 | std::string reasonIfUnsupported; |
| 203 | |
| 204 | // Try preferred backend first |
David Beck | 33f0ae0 | 2018-10-18 15:13:56 +0100 | [diff] [blame] | 205 | layer->SetBackendId(preferredBackend); |
David Beck | 29c75de | 2018-10-23 13:35:58 +0100 | [diff] [blame] | 206 | if (IWorkloadFactory::IsLayerSupported(*layer, |
| 207 | EmptyOptional(), |
| 208 | reasonIfUnsupported)) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 209 | { |
| 210 | supportedBackendFound = true; |
| 211 | } |
| 212 | else |
| 213 | { |
David Beck | f0b4845 | 2018-10-19 15:20:56 +0100 | [diff] [blame] | 214 | for (const auto& backend : availablePreferredBackends) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 215 | { |
| 216 | // Skip preferred backend (we already determined that it is not supported) |
| 217 | if (backend == preferredBackend) |
| 218 | { |
| 219 | continue; |
| 220 | } |
| 221 | |
David Beck | 33f0ae0 | 2018-10-18 15:13:56 +0100 | [diff] [blame] | 222 | layer->SetBackendId(backend); |
David Beck | 29c75de | 2018-10-23 13:35:58 +0100 | [diff] [blame] | 223 | if (IWorkloadFactory::IsLayerSupported(*layer, |
| 224 | EmptyOptional(), |
| 225 | reasonIfUnsupported)) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 226 | { |
| 227 | supportedBackendFound = true; |
| 228 | break; |
| 229 | } |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | return supportedBackendFound; |
| 234 | }; |
| 235 | |
| 236 | for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers) |
| 237 | { |
| 238 | if (!AssignFirstSupportedBackend(convertLayer, backend)) |
| 239 | { |
| 240 | return ReturnWithError(convertLayer); |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers) |
| 245 | { |
| 246 | if (!AssignFirstSupportedBackend(convertLayer, backend)) |
| 247 | { |
| 248 | return ReturnWithError(convertLayer); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | found = true; |
| 253 | break; |
| 254 | } |
| 255 | } |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 256 | std::stringstream warningMsg; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 257 | warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) |
David Beck | 33f0ae0 | 2018-10-18 15:13:56 +0100 | [diff] [blame] | 258 | << " is not supported on requested backend " << layer->GetBackendId().Get() |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 259 | << " for data type " << GetDataTypeName(dataType) |
| 260 | << " (reason: " << reasonIfUnsupported |
| 261 | << "), falling back to the next backend."; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 262 | ReportWarning(warningMsg.str(), errMessages); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 263 | } |
| 264 | else |
| 265 | { |
| 266 | found = true; |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 267 | backendSettings.m_SelectedBackends.insert(backend); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 268 | break; |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | // If the layer is unsupported by any devices, log and return a null network. |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 273 | if (!found) |
| 274 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 275 | // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a |
| 276 | // fallback we should set the compute device on the layer to CpuRef (these are not |
| 277 | // available as accelerated operations, or are only available under certain |
| 278 | // conditions, currently they comprise MemCopy, Constant, Permute) |
| 279 | armnn::LayerType layerType = layer->GetType(); |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 280 | if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy || |
| 281 | layerType == armnn::LayerType::Constant || |
| 282 | layerType == armnn::LayerType::Permute)) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 283 | { |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 284 | BackendId cpuBackendId(armnn::Compute::CpuRef); |
| 285 | layer->SetBackendId(cpuBackendId); |
| 286 | backendSettings.m_SelectedBackends.insert(cpuBackendId); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 287 | } |
| 288 | else |
| 289 | { |
| 290 | return ReturnWithError(layer); |
| 291 | } |
| 292 | } |
| 293 | } |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 294 | |
| 295 | return result; |
| 296 | } |
| 297 | |
| 298 | OptimizationResult InsertPreCompiledLayers(OptimizedNetwork* optNetObjPtr, |
| 299 | const IBackendInternalUniquePtr& backendObjPtr, |
| 300 | BackendSettings& backendSettings, |
| 301 | Optional<std::vector<std::string>&> errMessages) |
| 302 | { |
| 303 | BOOST_ASSERT(backendObjPtr); |
| 304 | |
| 305 | OptimizationResult result; |
| 306 | |
| 307 | // Select sub-graphs based on backend |
| 308 | SubGraphSelector::SubGraphs subGraphs = |
| 309 | SubGraphSelector::SelectSubGraphs(optNetObjPtr->GetGraph(), |
| 310 | // select layers assigned to requested backend |
| 311 | [&](const Layer& layer) |
| 312 | { |
| 313 | return layer.GetType() != LayerType::Input && |
| 314 | layer.GetType() != LayerType::Output && |
| 315 | layer.GetBackendId() == backendObjPtr->GetId(); |
| 316 | }); |
| 317 | |
| 318 | if (subGraphs.empty()) |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 319 | { |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 320 | // No sub-graphs found -> return with no error |
| 321 | return result; |
| 322 | } |
| 323 | |
| 324 | // Convert sub-graphs and substitute them with pre-compiled layers |
| 325 | unsigned int index = 0u; |
| 326 | for (auto& subGraph : subGraphs) |
| 327 | { |
| 328 | // Create a pre-compiled layer |
| 329 | PreCompiledLayer* preCompiledLayer = CreatePreCompiledLayer(optNetObjPtr->GetGraph(), |
| 330 | *subGraph, |
| 331 | index++, |
| 332 | backendObjPtr); |
| 333 | if (preCompiledLayer) |
| 334 | { |
| 335 | // Substitute sub-graph with pre-compiled layer in graph |
| 336 | optNetObjPtr->GetGraph().SubstituteSubGraph(std::move(subGraph), preCompiledLayer); |
| 337 | } |
| 338 | else |
| 339 | { |
| 340 | // Failed to create pre-compiled layer from sub-graph -> |
| 341 | // re-assign sub-graph layers to other available backends |
| 342 | std::stringstream warningMsg; |
| 343 | warningMsg << "Sub-graph #" << index << " failed to compile on " |
| 344 | << backendObjPtr->GetId() << ". Re-assigning backends to " |
| 345 | << subGraph->GetLayers().size() << " layers inside sub-graph"; |
| 346 | ReportWarning(warningMsg.str(), errMessages); |
| 347 | |
| 348 | backendSettings.m_IgnoredBackends = { backendObjPtr->GetId() }; |
| 349 | |
| 350 | Graph::Iterator firstLayer = subGraph->begin(); |
| 351 | Graph::Iterator lastLayer = subGraph->end(); |
| 352 | OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr, |
| 353 | backendSettings, |
| 354 | firstLayer, |
| 355 | lastLayer, |
| 356 | errMessages); |
| 357 | |
| 358 | if (reassignmentResult.m_Error) |
| 359 | { |
| 360 | result.m_Error = true; |
| 361 | return result; |
| 362 | } |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | return result; |
| 367 | } |
| 368 | |
| 369 | IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, |
| 370 | const std::vector<BackendId>& backendPreferences, |
| 371 | const IDeviceSpec& deviceSpec, |
| 372 | const OptimizerOptions& options, |
| 373 | Optional<std::vector<std::string>&> errMessages) |
| 374 | { |
| 375 | if (backendPreferences.empty()) |
| 376 | { |
| 377 | throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified"); |
| 378 | } |
| 379 | |
| 380 | const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork); |
| 381 | std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph()); |
| 382 | |
| 383 | auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy); |
| 384 | |
| 385 | OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get()); |
| 386 | |
| 387 | // Perform optimisation passes |
| 388 | using namespace optimizations; |
| 389 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(), |
| 390 | SquashEqualReshapeSiblings(), |
| 391 | OptimizeInversePermutes(), |
| 392 | MovePermuteUp(), |
| 393 | PermuteAsReshape(), |
| 394 | OptimizeConsecutiveReshapes())); |
| 395 | |
| 396 | // Infer the tensor infos for all output slots. Throws an exception on failure. |
| 397 | optNetObjPtr->GetGraph().InferTensorInfos(); |
| 398 | |
| 399 | // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16 |
| 400 | if (options.m_ReduceFp32ToFp16) |
| 401 | { |
| 402 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter())); |
| 403 | } |
| 404 | |
| 405 | // Initialize backend settings |
| 406 | BackendSettings backendSettings(backendPreferences, deviceSpec); |
| 407 | if (backendSettings.GetAvailablePreferredBackends().empty()) |
| 408 | { |
| 409 | std::stringstream failureMsg; |
| 410 | failureMsg << "None of the preferred backends " << backendPreferences |
| 411 | << " are supported. Current platform provides " << backendSettings.m_SupportedBackends; |
| 412 | ReportError(failureMsg.str(), errMessages); |
| 413 | return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); |
| 414 | } |
| 415 | |
| 416 | // Assign an available backend to each layer |
| 417 | Graph::Iterator firstLayer = optNetObjPtr->GetGraph().begin(); |
| 418 | Graph::Iterator lastLayer = optNetObjPtr->GetGraph().end(); |
| 419 | OptimizationResult assigBackendsResult = AssignBackends(optNetObjPtr, |
| 420 | backendSettings, |
| 421 | firstLayer, |
| 422 | lastLayer, |
| 423 | errMessages); |
| 424 | if (assigBackendsResult.m_Error) |
| 425 | { |
| 426 | // Failed to assign a backend to each layer |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 427 | return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); |
| 428 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 429 | |
| 430 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(), |
| 431 | OptimizeInverseConversionsFp32())); |
| 432 | |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 433 | // Insert pre-compiled layers where required by the backend |
| 434 | // TODO: This is a dummy/default backend id used for making the code build until |
| 435 | // we've properly refactored the optimizer |
| 436 | const BackendId backendId(Compute::Undefined); |
| 437 | auto const& backendRegistry = BackendRegistryInstance(); |
| 438 | if (backendRegistry.IsBackendRegistered(backendId)) |
| 439 | { |
| 440 | // Obtain a backend object using the registered factory |
| 441 | auto backendFactory = backendRegistry.GetFactory(backendId); |
| 442 | auto backendObjPtr = backendFactory(); |
| 443 | |
| 444 | OptimizationResult insertPreCompiledLayersResult = InsertPreCompiledLayers(optNetObjPtr, |
| 445 | backendObjPtr, |
| 446 | backendSettings, |
| 447 | errMessages); |
| 448 | if (insertPreCompiledLayersResult.m_Error) |
| 449 | { |
| 450 | // Failed to insert pre-compiled layers |
| 451 | return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy); |
| 452 | } |
| 453 | } |
| 454 | |
| 455 | // If the debug flag is set, then insert a DebugLayer after each layer. |
| 456 | // NOTE: This optimization can only happen strictly after the PreCompiled layers have |
| 457 | // already been inserted |
| 458 | if (options.m_Debug) |
| 459 | { |
| 460 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer())); |
| 461 | } |
| 462 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 463 | optNetObjPtr->GetGraph().AddCopyLayers(); |
| 464 | |
| 465 | // Convert constants |
| 466 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf())); |
| 467 | Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat())); |
| 468 | |
David Beck | 263e349 | 2018-11-09 14:46:40 +0000 | [diff] [blame] | 469 | // Run backend specific optimizations |
Matteo Martincigh | 4912402 | 2019-01-11 13:25:59 +0000 | [diff] [blame] | 470 | for (auto&& chosenBackend : backendSettings.m_SelectedBackends) |
David Beck | 263e349 | 2018-11-09 14:46:40 +0000 | [diff] [blame] | 471 | { |
| 472 | auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend); |
| 473 | auto backendPtr = factoryFun(); |
| 474 | BOOST_ASSERT(backendPtr.get() != nullptr); |
| 475 | |
| 476 | auto backendSpecificOptimizations = backendPtr->GetOptimizations(); |
| 477 | if (!backendSpecificOptimizations.empty()) |
| 478 | { |
| 479 | Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations); |
| 480 | } |
| 481 | } |
| 482 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 483 | return optNet; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 484 | } |
| 485 | |
jimfly01 | 6b0b53d | 2018-10-08 14:43:01 +0100 | [diff] [blame] | 486 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 487 | Network::Network() |
| 488 | : m_Graph(std::make_unique<Graph>()) |
| 489 | { |
| 490 | } |
| 491 | |
| 492 | Network::~Network() |
| 493 | { |
| 494 | } |
| 495 | |
| 496 | IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name) |
| 497 | { |
| 498 | return m_Graph->AddLayer<InputLayer>(id, name); |
| 499 | } |
| 500 | |
Éanna Ó Catháin | 4e1e136 | 2018-11-12 11:36:34 +0000 | [diff] [blame] | 501 | IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, |
| 502 | const char* name) |
| 503 | { |
| 504 | return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name); |
| 505 | } |
| 506 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 507 | IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 508 | const ConstTensor& weights, |
| 509 | const ConstTensor* biases, |
| 510 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 511 | { |
| 512 | if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr)) |
| 513 | { |
| 514 | throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL"); |
| 515 | } |
| 516 | |
| 517 | const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name); |
| 518 | |
| 519 | layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); |
| 520 | |
| 521 | if (fullyConnectedDescriptor.m_BiasEnabled) |
| 522 | { |
| 523 | layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); |
| 524 | } |
| 525 | |
| 526 | return layer; |
| 527 | } |
| 528 | |
| 529 | IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 530 | const ConstTensor& weights, |
| 531 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 532 | { |
| 533 | return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name); |
| 534 | } |
| 535 | |
| 536 | IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 537 | const ConstTensor& weights, |
| 538 | const ConstTensor& biases, |
| 539 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 540 | { |
| 541 | return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name); |
| 542 | } |
| 543 | |
| 544 | IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 545 | const ConstTensor& weights, |
| 546 | const ConstTensor* biases, |
| 547 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 548 | { |
| 549 | if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr)) |
| 550 | { |
| 551 | throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL"); |
| 552 | } |
| 553 | |
| 554 | const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name); |
| 555 | |
| 556 | layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); |
| 557 | |
| 558 | if (convolution2dDescriptor.m_BiasEnabled) |
| 559 | { |
| 560 | layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); |
| 561 | } |
| 562 | |
| 563 | return layer; |
| 564 | } |
| 565 | |
| 566 | IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 567 | const ConstTensor& weights, |
| 568 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 569 | { |
| 570 | return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name); |
| 571 | } |
| 572 | IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 573 | const ConstTensor& weights, |
| 574 | const ConstTensor& biases, |
| 575 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 576 | { |
| 577 | return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name); |
| 578 | } |
| 579 | |
| 580 | IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl( |
| 581 | const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, |
| 582 | const ConstTensor& weights, |
| 583 | const ConstTensor* biases, |
| 584 | const char* name) |
| 585 | { |
| 586 | if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr)) |
| 587 | { |
| 588 | throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL"); |
| 589 | } |
| 590 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 591 | const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, |
| 592 | name); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 593 | |
| 594 | layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); |
| 595 | |
| 596 | if (convolution2dDescriptor.m_BiasEnabled) |
| 597 | { |
| 598 | layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); |
| 599 | } |
| 600 | |
| 601 | return layer; |
| 602 | } |
| 603 | |
| 604 | IConnectableLayer* Network::AddDepthwiseConvolution2dLayer( |
| 605 | const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, |
| 606 | const ConstTensor& weights, |
| 607 | const char* name) |
| 608 | { |
| 609 | return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name); |
| 610 | } |
| 611 | IConnectableLayer* Network::AddDepthwiseConvolution2dLayer( |
| 612 | const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, |
| 613 | const ConstTensor& weights, |
| 614 | const ConstTensor& biases, |
| 615 | const char* name) |
| 616 | { |
| 617 | return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name); |
| 618 | } |
| 619 | |
| 620 | IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor, |
| 621 | const char* name) |
| 622 | { |
| 623 | return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name); |
| 624 | } |
| 625 | |
| 626 | IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor, |
| 627 | const char* name) |
| 628 | { |
| 629 | return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name); |
| 630 | } |
| 631 | |
| 632 | IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor, |
| 633 | const char* name) |
| 634 | { |
| 635 | return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name); |
| 636 | } |
| 637 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 638 | IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor& |
| 639 | normalizationDescriptor, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 640 | const char* name) |
| 641 | { |
| 642 | return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name); |
| 643 | } |
| 644 | |
| 645 | IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor, |
| 646 | const char* name) |
| 647 | { |
| 648 | return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name); |
| 649 | } |
| 650 | |
| 651 | IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor, |
| 652 | const char* name) |
| 653 | { |
| 654 | return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name); |
| 655 | } |
| 656 | |
Nattapat Chaimanowong | 5a4304a | 2018-11-28 10:44:37 +0000 | [diff] [blame] | 657 | IConnectableLayer* Network::AddMaximumLayer(const char* name) |
| 658 | { |
| 659 | return m_Graph->AddLayer<MaximumLayer>(name); |
| 660 | } |
| 661 | |
Éanna Ó Catháin | 20e5880 | 2018-12-04 10:29:06 +0000 | [diff] [blame] | 662 | IConnectableLayer* Network::AddMinimumLayer(const char* name) |
| 663 | { |
| 664 | return m_Graph->AddLayer<MinimumLayer>(name); |
| 665 | } |
| 666 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 667 | IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor, |
| 668 | const char* name) |
| 669 | { |
| 670 | return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name); |
| 671 | } |
| 672 | |
| 673 | IConnectableLayer* Network::AddAdditionLayer(const char* name) |
| 674 | { |
| 675 | return m_Graph->AddLayer<AdditionLayer>(name); |
| 676 | } |
| 677 | |
| 678 | IConnectableLayer* Network::AddMultiplicationLayer(const char* name) |
| 679 | { |
| 680 | return m_Graph->AddLayer<MultiplicationLayer>(name); |
| 681 | } |
| 682 | |
| 683 | IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name) |
| 684 | { |
| 685 | return m_Graph->AddLayer<OutputLayer>(id, name); |
| 686 | } |
| 687 | |
| 688 | IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc, |
| 689 | const ConstTensor& mean, |
| 690 | const ConstTensor& variance, |
| 691 | const ConstTensor& beta, |
| 692 | const ConstTensor& gamma, |
| 693 | const char* name) |
| 694 | { |
| 695 | const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name); |
| 696 | |
| 697 | layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean); |
| 698 | layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance); |
| 699 | layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta); |
| 700 | layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma); |
| 701 | |
| 702 | return layer; |
| 703 | } |
| 704 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 705 | IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& |
| 706 | resizeDescriptor, const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 707 | { |
| 708 | return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name); |
| 709 | } |
| 710 | |
Matteo Martincigh | bcd3c85 | 2018-09-28 14:14:12 +0100 | [diff] [blame] | 711 | IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, |
| 712 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 713 | { |
Matteo Martincigh | bcd3c85 | 2018-09-28 14:14:12 +0100 | [diff] [blame] | 714 | return m_Graph->AddLayer<L2NormalizationLayer>(desc, name); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 715 | } |
| 716 | |
| 717 | IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name) |
| 718 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 719 | auto layer = m_Graph->AddLayer<ConstantLayer>(name); |
| 720 | |
| 721 | layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input); |
| 722 | |
| 723 | return layer; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 724 | } |
| 725 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 726 | IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor, |
| 727 | const char* name) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 728 | { |
| 729 | return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name); |
| 730 | } |
| 731 | |
Nattapat Chaimanowong | 207ef9a | 2018-11-02 10:57:25 +0000 | [diff] [blame] | 732 | IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor, |
| 733 | const char* name) |
| 734 | { |
| 735 | return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name); |
| 736 | } |
| 737 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 738 | IConnectableLayer* Network::AddFloorLayer(const char* name) |
| 739 | { |
| 740 | return m_Graph->AddLayer<FloorLayer>(name); |
| 741 | } |
| 742 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 743 | IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor& descriptor, |
| 744 | const LstmInputParams& params, |
| 745 | const char* name) |
| 746 | { |
| 747 | const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name); |
| 748 | |
| 749 | //Lstm Basic Parameters |
| 750 | layer->m_BasicParameters.m_InputToForgetWeights = |
| 751 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights)); |
| 752 | layer->m_BasicParameters.m_InputToCellWeights = |
| 753 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights)); |
| 754 | layer->m_BasicParameters.m_InputToOutputWeights = |
| 755 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights)); |
| 756 | layer->m_BasicParameters.m_RecurrentToForgetWeights = |
| 757 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights)); |
| 758 | layer->m_BasicParameters.m_RecurrentToCellWeights = |
| 759 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights)); |
| 760 | layer->m_BasicParameters.m_RecurrentToOutputWeights = |
| 761 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights)); |
| 762 | layer->m_BasicParameters.m_ForgetGateBias = |
| 763 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias)); |
| 764 | layer->m_BasicParameters.m_CellBias = |
| 765 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias)); |
| 766 | layer->m_BasicParameters.m_OutputGateBias = |
| 767 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias)); |
| 768 | |
| 769 | //Lstm Cifg parameters |
| 770 | if(!descriptor.m_CifgEnabled) |
| 771 | { |
| 772 | if(params.m_InputToInputWeights == nullptr) |
| 773 | { |
| 774 | throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL"); |
| 775 | } |
| 776 | if(params.m_RecurrentToInputWeights == nullptr) |
| 777 | { |
| 778 | throw InvalidArgumentException( |
| 779 | "AddLstmLayer: Recurrent To Input Weights cannot be NULL"); |
| 780 | } |
| 781 | if(params.m_InputGateBias == nullptr) |
| 782 | { |
| 783 | throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL"); |
| 784 | } |
| 785 | layer->m_CifgParameters.m_InputToInputWeights = |
| 786 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights)); |
| 787 | layer->m_CifgParameters.m_RecurrentToInputWeights = |
| 788 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights)); |
| 789 | // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not. |
| 790 | if(params.m_CellToInputWeights != nullptr) |
| 791 | { |
| 792 | layer->m_CifgParameters.m_CellToInputWeights = |
| 793 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights)); |
| 794 | } |
| 795 | layer->m_CifgParameters.m_InputGateBias = |
| 796 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias)); |
| 797 | } |
| 798 | |
| 799 | //Lstm projection parameters |
| 800 | if(descriptor.m_ProjectionEnabled) |
| 801 | { |
| 802 | if(params.m_ProjectionWeights == nullptr) |
| 803 | { |
| 804 | throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL"); |
| 805 | } |
| 806 | layer->m_ProjectionParameters.m_ProjectionWeights = |
| 807 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights)); |
| 808 | if(params.m_ProjectionBias != nullptr) |
| 809 | { |
| 810 | layer->m_ProjectionParameters.m_ProjectionBias = |
| 811 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias)); |
| 812 | } |
| 813 | } |
| 814 | |
| 815 | //Lstm Peephole params |
| 816 | if(descriptor.m_PeepholeEnabled) |
| 817 | { |
| 818 | if(params.m_CellToForgetWeights == nullptr) |
| 819 | { |
| 820 | throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL"); |
| 821 | } |
| 822 | if(params.m_CellToOutputWeights == nullptr) |
| 823 | { |
| 824 | throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL"); |
| 825 | } |
| 826 | layer->m_PeepholeParameters.m_CellToForgetWeights = |
| 827 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights)); |
| 828 | layer->m_PeepholeParameters.m_CellToOutputWeights = |
| 829 | std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights)); |
| 830 | } |
| 831 | return layer; |
| 832 | } |
| 833 | |
Francis Murtagh | e7a86a4 | 2018-08-29 12:42:10 +0100 | [diff] [blame] | 834 | IConnectableLayer* Network::AddDivisionLayer(const char* name) |
| 835 | { |
| 836 | return m_Graph->AddLayer<DivisionLayer>(name); |
| 837 | } |
| 838 | |
David Beck | 1952622 | 2018-09-12 16:00:08 +0100 | [diff] [blame] | 839 | IConnectableLayer* Network::AddSubtractionLayer(const char* name) |
| 840 | { |
| 841 | return m_Graph->AddLayer<SubtractionLayer>(name); |
| 842 | } |
| 843 | |
narpra01 | 32b9046 | 2018-09-13 11:07:48 +0100 | [diff] [blame] | 844 | IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name) |
| 845 | { |
| 846 | return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name); |
| 847 | } |
| 848 | |
Mohamed Nour Abouelseoud | 5662c20 | 2018-09-24 13:30:09 +0100 | [diff] [blame] | 849 | IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name) |
| 850 | { |
| 851 | return m_Graph->AddLayer<PadLayer>(padDescriptor,name); |
| 852 | } |
| 853 | |
Conor Kennedy | 430b5d8 | 2018-11-14 15:28:28 +0000 | [diff] [blame] | 854 | IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, |
| 855 | const char* name) |
| 856 | { |
| 857 | return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name); |
| 858 | } |
| 859 | |
Matteo Martincigh | 59a950c | 2018-12-13 12:48:25 +0000 | [diff] [blame] | 860 | IConnectableLayer* Network::AddGreaterLayer(const char* name) |
| 861 | { |
| 862 | return m_Graph->AddLayer<GreaterLayer>(name); |
| 863 | } |
| 864 | |
FrancisMurtagh | 2099595 | 2018-12-17 12:11:36 +0000 | [diff] [blame] | 865 | IConnectableLayer* Network::AddEqualLayer(const char* name) |
| 866 | { |
jimfly01 | 84c70e6 | 2018-12-19 13:14:46 +0000 | [diff] [blame] | 867 | return m_Graph->AddLayer<EqualLayer>(name); |
FrancisMurtagh | 2099595 | 2018-12-17 12:11:36 +0000 | [diff] [blame] | 868 | } |
| 869 | |
Mohamed Nour Abouelseoud | a1d3c6a | 2018-12-27 12:39:16 +0000 | [diff] [blame] | 870 | IConnectableLayer* Network::AddRsqrtLayer(const char * name) |
| 871 | { |
| 872 | return m_Graph->AddLayer<RsqrtLayer>(name); |
| 873 | } |
| 874 | |
narpra01 | b89b05f | 2019-01-16 09:53:09 +0000 | [diff] [blame^] | 875 | IConnectableLayer* Network::AddGatherLayer(const char* name) |
| 876 | { |
| 877 | return m_Graph->AddLayer<GatherLayer>(name); |
| 878 | } |
| 879 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 880 | OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph) |
| 881 | : m_Graph(std::move(graph)) |
| 882 | { |
| 883 | } |
| 884 | |
| 885 | OptimizedNetwork::~OptimizedNetwork() |
| 886 | { |
| 887 | } |
| 888 | |
| 889 | } // namespace armnn |