Aron Virginas-Tar | 7010400 | 2018-10-24 15:33:28 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | #pragma once |
| 6 | |
Aron Virginas-Tar | d4f0fea | 2019-04-09 14:08:06 +0100 | [diff] [blame] | 7 | #include <ResolveType.hpp> |
Nattapat Chaimanowong | 1fcb4ff | 2019-01-24 15:25:26 +0000 | [diff] [blame] | 8 | |
Aron Virginas-Tar | 7010400 | 2018-10-24 15:33:28 +0100 | [diff] [blame] | 9 | #include <armnn/ArmNN.hpp> |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 10 | #include <armnn/INetwork.hpp> |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 11 | #include <Profiling.hpp> |
Aron Virginas-Tar | 7010400 | 2018-10-24 15:33:28 +0100 | [diff] [blame] | 12 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 13 | #include <backendsCommon/test/QuantizeHelper.hpp> |
Aron Virginas-Tar | 7010400 | 2018-10-24 15:33:28 +0100 | [diff] [blame] | 14 | |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 15 | #include <boost/test/unit_test.hpp> |
| 16 | |
Aron Virginas-Tar | 7010400 | 2018-10-24 15:33:28 +0100 | [diff] [blame] | 17 | #include <vector> |
| 18 | |
| 19 | namespace |
| 20 | { |
| 21 | |
| 22 | using namespace armnn; |
| 23 | |
| 24 | template<typename T> |
| 25 | bool ConstantUsageTest(const std::vector<BackendId>& computeDevice, |
| 26 | const TensorInfo& commonTensorInfo, |
| 27 | const std::vector<T>& inputData, |
| 28 | const std::vector<T>& constantData, |
| 29 | const std::vector<T>& expectedOutputData) |
| 30 | { |
| 31 | // Create runtime in which test will run |
| 32 | IRuntime::CreationOptions options; |
| 33 | IRuntimePtr runtime(IRuntime::Create(options)); |
| 34 | |
| 35 | // Builds up the structure of the network. |
| 36 | INetworkPtr net(INetwork::Create()); |
| 37 | |
| 38 | IConnectableLayer* input = net->AddInputLayer(0); |
| 39 | IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData)); |
| 40 | IConnectableLayer* add = net->AddAdditionLayer(); |
| 41 | IConnectableLayer* output = net->AddOutputLayer(0); |
| 42 | |
| 43 | input->GetOutputSlot(0).Connect(add->GetInputSlot(0)); |
| 44 | constant->GetOutputSlot(0).Connect(add->GetInputSlot(1)); |
| 45 | add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); |
| 46 | |
| 47 | // Sets the tensors in the network. |
| 48 | input->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); |
| 49 | constant->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); |
| 50 | add->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); |
| 51 | |
| 52 | // optimize the network |
| 53 | IOptimizedNetworkPtr optNet = Optimize(*net, computeDevice, runtime->GetDeviceSpec()); |
| 54 | |
| 55 | // Loads it into the runtime. |
| 56 | NetworkId netId; |
| 57 | runtime->LoadNetwork(netId, std::move(optNet)); |
| 58 | |
| 59 | // Creates structures for input & output. |
| 60 | std::vector<T> outputData(inputData.size()); |
| 61 | |
| 62 | InputTensors inputTensors |
| 63 | { |
| 64 | {0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} |
| 65 | }; |
| 66 | OutputTensors outputTensors |
| 67 | { |
| 68 | {0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} |
| 69 | }; |
| 70 | |
| 71 | // Does the inference. |
| 72 | runtime->EnqueueWorkload(netId, inputTensors, outputTensors); |
| 73 | |
| 74 | // Checks the results. |
| 75 | return outputData == expectedOutputData; |
| 76 | } |
| 77 | |
| 78 | inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends) |
| 79 | { |
| 80 | const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32); |
| 81 | |
| 82 | return ConstantUsageTest(backends, |
| 83 | commonTensorInfo, |
| 84 | std::vector<float>{ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, // Input. |
| 85 | std::vector<float>{ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, // Const input. |
| 86 | std::vector<float>{ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f } // Expected output. |
| 87 | ); |
| 88 | } |
| 89 | |
| 90 | inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends) |
| 91 | { |
| 92 | TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8); |
| 93 | |
| 94 | const float scale = 0.023529f; |
| 95 | const int8_t offset = -43; |
| 96 | |
| 97 | commonTensorInfo.SetQuantizationScale(scale); |
| 98 | commonTensorInfo.SetQuantizationOffset(offset); |
| 99 | |
| 100 | return ConstantUsageTest(backends, |
| 101 | commonTensorInfo, |
| 102 | QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input. |
| 103 | QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input. |
| 104 | QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }) // Expected output. |
| 105 | ); |
| 106 | } |
| 107 | |
Nattapat Chaimanowong | 1fcb4ff | 2019-01-24 15:25:26 +0000 | [diff] [blame] | 108 | template<typename T> |
| 109 | bool CompareBoolean(T a, T b) |
| 110 | { |
| 111 | return (a == 0 && b == 0) ||(a != 0 && b != 0); |
| 112 | }; |
| 113 | |
| 114 | template<DataType ArmnnIType, DataType ArmnnOType, |
| 115 | typename TInput = ResolveType<ArmnnIType>, typename TOutput = ResolveType<ArmnnOType>> |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 116 | void EndToEndLayerTestImpl(INetworkPtr network, |
kevmay01 | 2b4d88e | 2019-01-24 14:05:09 +0000 | [diff] [blame] | 117 | const std::map<int, std::vector<TInput>>& inputTensorData, |
| 118 | const std::map<int, std::vector<TOutput>>& expectedOutputData, |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 119 | std::vector<BackendId> backends) |
| 120 | { |
| 121 | // Create runtime in which test will run |
| 122 | IRuntime::CreationOptions options; |
| 123 | IRuntimePtr runtime(IRuntime::Create(options)); |
| 124 | |
| 125 | // optimize the network |
| 126 | IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec()); |
| 127 | |
| 128 | // Loads it into the runtime. |
| 129 | NetworkId netId; |
| 130 | runtime->LoadNetwork(netId, std::move(optNet)); |
| 131 | |
| 132 | InputTensors inputTensors; |
| 133 | inputTensors.reserve(inputTensorData.size()); |
| 134 | for (auto&& it : inputTensorData) |
| 135 | { |
| 136 | inputTensors.push_back({it.first, |
| 137 | ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())}); |
| 138 | } |
| 139 | OutputTensors outputTensors; |
| 140 | outputTensors.reserve(expectedOutputData.size()); |
kevmay01 | 2b4d88e | 2019-01-24 14:05:09 +0000 | [diff] [blame] | 141 | std::map<int, std::vector<TOutput>> outputStorage; |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 142 | for (auto&& it : expectedOutputData) |
| 143 | { |
kevmay01 | 2b4d88e | 2019-01-24 14:05:09 +0000 | [diff] [blame] | 144 | std::vector<TOutput> out(it.second.size()); |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 145 | outputStorage.emplace(it.first, out); |
| 146 | outputTensors.push_back({it.first, |
| 147 | Tensor(runtime->GetOutputTensorInfo(netId, it.first), |
| 148 | outputStorage.at(it.first).data())}); |
| 149 | } |
| 150 | |
| 151 | // Does the inference. |
| 152 | runtime->EnqueueWorkload(netId, inputTensors, outputTensors); |
| 153 | |
| 154 | // Checks the results. |
| 155 | for (auto&& it : expectedOutputData) |
| 156 | { |
kevmay01 | 2b4d88e | 2019-01-24 14:05:09 +0000 | [diff] [blame] | 157 | std::vector<TOutput> out = outputStorage.at(it.first); |
Nattapat Chaimanowong | 1fcb4ff | 2019-01-24 15:25:26 +0000 | [diff] [blame] | 158 | if (ArmnnOType == DataType::Boolean) |
| 159 | { |
| 160 | for (unsigned int i = 0; i < out.size(); ++i) |
| 161 | { |
| 162 | BOOST_TEST(CompareBoolean<TOutput>(it.second[i], out[i])); |
| 163 | } |
| 164 | } |
| 165 | else |
| 166 | { |
Narumol Prangnawarat | 6d302bf | 2019-02-04 11:46:26 +0000 | [diff] [blame] | 167 | for (unsigned int i = 0; i < out.size(); ++i) |
| 168 | { |
| 169 | BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f)); |
| 170 | } |
Nattapat Chaimanowong | 1fcb4ff | 2019-01-24 15:25:26 +0000 | [diff] [blame] | 171 | } |
narpra01 | b9546cf | 2018-11-20 15:21:28 +0000 | [diff] [blame] | 172 | } |
| 173 | } |
| 174 | |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 175 | inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends) |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 176 | { |
| 177 | using namespace armnn; |
| 178 | |
| 179 | // Create runtime in which test will run |
| 180 | IRuntime::CreationOptions options; |
| 181 | IRuntimePtr runtime(armnn::IRuntime::Create(options)); |
| 182 | |
| 183 | // build up the structure of the network |
| 184 | INetworkPtr net(INetwork::Create()); |
| 185 | |
| 186 | IConnectableLayer* input = net->AddInputLayer(0); |
| 187 | |
| 188 | NormalizationDescriptor descriptor; |
| 189 | IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); |
| 190 | |
| 191 | IConnectableLayer* output = net->AddOutputLayer(0); |
| 192 | |
| 193 | input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); |
| 194 | norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); |
| 195 | |
| 196 | input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 197 | norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 198 | |
| 199 | // Optimize the network |
| 200 | IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); |
| 201 | |
| 202 | // Loads it into the runtime. |
| 203 | NetworkId netId; |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 204 | std::string ignoredErrorMessage; |
| 205 | // Enable Importing |
| 206 | INetworkProperties networkProperties(true, true); |
| 207 | runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties); |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 208 | |
| 209 | // Creates structures for input & output |
| 210 | std::vector<float> inputData |
| 211 | { |
| 212 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f |
| 213 | }; |
| 214 | |
| 215 | // Misaligned input |
Aron Virginas-Tar | d9f7c8b | 2019-09-13 13:37:03 +0100 | [diff] [blame] | 216 | float* misalignedInputData = reinterpret_cast<float*>(reinterpret_cast<char*>(inputData.data()) + 1); |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 217 | |
| 218 | std::vector<float> outputData(5); |
| 219 | |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 220 | // Aligned output |
| 221 | float * alignedOutputData = outputData.data(); |
| 222 | |
| 223 | InputTensors inputTensors |
| 224 | { |
| 225 | {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)}, |
| 226 | }; |
| 227 | OutputTensors outputTensors |
| 228 | { |
| 229 | {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), alignedOutputData)} |
| 230 | }; |
| 231 | |
| 232 | // The result of the inference is not important, just the fact that there |
| 233 | // should not be CopyMemGeneric workloads. |
| 234 | runtime->GetProfiler(netId)->EnableProfiling(true); |
| 235 | |
| 236 | // Do the inference and expect it to fail with a ImportMemoryException |
| 237 | BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException); |
| 238 | } |
| 239 | |
| 240 | inline void ImportNonAlignedOutputPointerTest(std::vector<BackendId> backends) |
| 241 | { |
| 242 | using namespace armnn; |
| 243 | |
| 244 | // Create runtime in which test will run |
| 245 | IRuntime::CreationOptions options; |
| 246 | IRuntimePtr runtime(armnn::IRuntime::Create(options)); |
| 247 | |
| 248 | // build up the structure of the network |
| 249 | INetworkPtr net(INetwork::Create()); |
| 250 | |
| 251 | IConnectableLayer* input = net->AddInputLayer(0); |
| 252 | |
| 253 | NormalizationDescriptor descriptor; |
| 254 | IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); |
| 255 | |
| 256 | IConnectableLayer* output = net->AddOutputLayer(0); |
| 257 | |
| 258 | input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); |
| 259 | norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); |
| 260 | |
| 261 | input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 262 | norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 263 | |
| 264 | // Optimize the network |
| 265 | IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); |
| 266 | |
| 267 | // Loads it into the runtime. |
| 268 | NetworkId netId; |
| 269 | std::string ignoredErrorMessage; |
| 270 | // Enable Importing |
| 271 | INetworkProperties networkProperties(true, true); |
| 272 | runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties); |
| 273 | |
| 274 | // Creates structures for input & output |
| 275 | std::vector<float> inputData |
| 276 | { |
| 277 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f |
| 278 | }; |
| 279 | |
| 280 | // Aligned input |
| 281 | float * alignedInputData = inputData.data(); |
| 282 | |
| 283 | std::vector<float> outputData(5); |
| 284 | |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 285 | // Misaligned output |
Aron Virginas-Tar | d9f7c8b | 2019-09-13 13:37:03 +0100 | [diff] [blame] | 286 | float* misalignedOutputData = reinterpret_cast<float*>(reinterpret_cast<char*>(outputData.data()) + 1); |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 287 | |
| 288 | InputTensors inputTensors |
| 289 | { |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 290 | {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputData)}, |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 291 | }; |
| 292 | OutputTensors outputTensors |
| 293 | { |
| 294 | {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputData)} |
| 295 | }; |
| 296 | |
| 297 | // The result of the inference is not important, just the fact that there |
| 298 | // should not be CopyMemGeneric workloads. |
| 299 | runtime->GetProfiler(netId)->EnableProfiling(true); |
| 300 | |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 301 | // Do the inference and expect it to fail with a ImportMemoryException |
| 302 | BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException); |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | inline void ImportAlignedPointerTest(std::vector<BackendId> backends) |
| 306 | { |
| 307 | using namespace armnn; |
| 308 | |
| 309 | // Create runtime in which test will run |
| 310 | IRuntime::CreationOptions options; |
| 311 | IRuntimePtr runtime(armnn::IRuntime::Create(options)); |
| 312 | |
| 313 | // build up the structure of the network |
| 314 | INetworkPtr net(INetwork::Create()); |
| 315 | |
| 316 | IConnectableLayer* input = net->AddInputLayer(0); |
| 317 | |
| 318 | NormalizationDescriptor descriptor; |
| 319 | IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); |
| 320 | |
| 321 | IConnectableLayer* output = net->AddOutputLayer(0); |
| 322 | |
| 323 | input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); |
| 324 | norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); |
| 325 | |
| 326 | input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 327 | norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); |
| 328 | |
| 329 | // Optimize the network |
| 330 | IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); |
| 331 | |
| 332 | // Loads it into the runtime. |
| 333 | NetworkId netId; |
David Monahan | 4f1e8e4 | 2019-09-04 09:22:10 +0100 | [diff] [blame^] | 334 | std::string ignoredErrorMessage; |
| 335 | // Enable Importing |
| 336 | INetworkProperties networkProperties(true, true); |
| 337 | runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties); |
Ferran Balaguer | dcaa610 | 2019-08-21 13:28:38 +0100 | [diff] [blame] | 338 | |
| 339 | // Creates structures for input & output |
| 340 | std::vector<float> inputData |
| 341 | { |
| 342 | 1.0f, 2.0f, 3.0f, 4.0f |
| 343 | }; |
| 344 | |
| 345 | std::vector<float> outputData(4); |
| 346 | |
| 347 | InputTensors inputTensors |
| 348 | { |
| 349 | {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, |
| 350 | }; |
| 351 | OutputTensors outputTensors |
| 352 | { |
| 353 | {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} |
| 354 | }; |
| 355 | |
| 356 | // The result of the inference is not important, just the fact that there |
| 357 | // should not be CopyMemGeneric workloads. |
| 358 | runtime->GetProfiler(netId)->EnableProfiling(true); |
| 359 | |
| 360 | // Do the inference |
| 361 | runtime->EnqueueWorkload(netId, inputTensors, outputTensors); |
| 362 | |
| 363 | // Retrieve the Profiler.Print() output to get the workload execution |
| 364 | ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); |
| 365 | std::stringstream ss; |
| 366 | profilerManager.GetProfiler()->Print(ss);; |
| 367 | std::string dump = ss.str(); |
| 368 | |
| 369 | // Contains RefNormalizationWorkload |
| 370 | std::size_t found = dump.find("RefNormalizationWorkload"); |
| 371 | BOOST_TEST(found != std::string::npos); |
| 372 | // Contains SyncMemGeneric |
| 373 | found = dump.find("SyncMemGeneric"); |
| 374 | BOOST_TEST(found != std::string::npos); |
| 375 | // No contains CopyMemGeneric |
| 376 | found = dump.find("CopyMemGeneric"); |
| 377 | BOOST_TEST(found == std::string::npos); |
| 378 | } |
| 379 | |
Nattapat Chaimanowong | 1fcb4ff | 2019-01-24 15:25:26 +0000 | [diff] [blame] | 380 | } // anonymous namespace |