blob: b7263d7d76cb6abd468d99d1baf216603c561002 [file] [log] [blame]
Francis Murtaghb87f5442021-09-23 13:20:53 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn/ArmNN.hpp>
7
8#include <iostream>
9
10// A simple example application to show the usage of Memory Management Pre Importing of Inputs and Outputs. In this
11// sample, the users single input number is added to itself using an add layer and outputted to console as a number
12// that is double the input. The code does not use EnqueueWorkload but instead uses runtime->Execute
13
14int main()
15{
16 using namespace armnn;
17
18 float number;
19 std::cout << "Please enter a number: " << std::endl;
20 std::cin >> number;
21
22 // Turn on logging to standard output
23 // This is useful in this sample so that users can learn more about what is going on
24 armnn::ConfigureLogging(true, false, LogSeverity::Info);
25
26 armnn::IRuntime::CreationOptions options;
27 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
28
29 armnn::NetworkId networkIdentifier1 = 0;
30
31 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
32 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
33 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
Mike Kelly2c14db62023-03-15 15:06:23 +000034 ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtaghb87f5442021-09-23 13:20:53 +010035 auto addLayer = testNetwork->AddAdditionLayer("add layer");
Mike Kelly2c14db62023-03-15 15:06:23 +000036 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtaghb87f5442021-09-23 13:20:53 +010037 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
38
39 // Set the tensors in the network.
40 TensorInfo tensorInfo{{4}, armnn::DataType::Float32};
41
42 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
43 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
44 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
45 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
46
47 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
48 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
49
50 // Set preferred backend to CpuRef
51 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
52
53 // To hold an eventual error message if loading the network fails
54 std::string er;
55
56 // Initialize network properties with asyncEnabled and MemorySources != MemorySource::Undefined
57 armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
58
59 // Optimize and Load the network into runtime
60 runtime->LoadNetwork(networkIdentifier1,
61 Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
62 er,
63 networkProperties);
64
65 // Create structures for input & output
66 std::vector<float> inputData1(4, number);
67 std::vector<float> inputData2(4, number);
68 ConstTensor inputTensor1(tensorInfo, inputData1.data());
69 ConstTensor inputTensor2(tensorInfo, inputData2.data());
70
71 std::vector<float> outputData1(4);
72 Tensor outputTensor1{tensorInfo, outputData1.data()};
73
74 // ImportInputs separates the importing and mapping of InputTensors from network execution.
75 // Allowing for a set of InputTensors to be imported and mapped once, but used in execution many times.
76 // ImportInputs is not thread safe and must not be used while other threads are calling Execute().
77 // Only compatible with AsyncEnabled networks
78
79 // PreImport inputTensors giving pre-imported ids of 1 and 2
80 std::vector<ImportedInputId> importedInputVec = runtime->ImportInputs(networkIdentifier1,
81 {{0, inputTensor1}, {1, inputTensor2}});
82
83 // Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
84 // overlapped Execution by calling this function from different threads.
85 auto memHandle = runtime->CreateWorkingMemHandle(networkIdentifier1);
86
87 // Execute evaluates a network using input in inputTensors and outputs filled into outputTensors.
88 // This function performs a thread safe execution of the network. Returns once execution is complete.
89 // Will block until this and any other thread using the same workingMem object completes.
90 // Execute with PreImported inputTensor1 as well as Non-PreImported inputTensor2
Colm Donelan1358e042024-05-14 10:26:46 +010091ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtaghb87f5442021-09-23 13:20:53 +010092 runtime->Execute(*memHandle.get(), {}, {{2, outputTensor1}}, importedInputVec /* pre-imported ids */);
Colm Donelan1358e042024-05-14 10:26:46 +010093ARMNN_NO_DEPRECATE_WARN_END
Francis Murtaghb87f5442021-09-23 13:20:53 +010094
95 // ImportOutputs separates the importing and mapping of OutputTensors from network execution.
96 // Allowing for a set of OutputTensors to be imported and mapped once, but used in execution many times.
97 // This function is not thread safe and must not be used while other threads are calling Execute().
98 // Only compatible with AsyncEnabled networks
99 // Provide layerBinding Id to outputTensor1
100 std::pair<LayerBindingId, class Tensor> output1{2, outputTensor1};
101 // PreImport outputTensor1
102 std::vector<ImportedOutputId> importedOutputVec = runtime->ImportOutputs(networkIdentifier1, {output1});
103
Colm Donelan1358e042024-05-14 10:26:46 +0100104ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtaghb87f5442021-09-23 13:20:53 +0100105 // Execute with Non-PreImported inputTensor1 as well as PreImported inputTensor2
106 runtime->Execute(*memHandle.get(), {{0, inputTensor1}}, {{2, outputTensor1}}, {1 /* pre-imported id */});
Colm Donelan1358e042024-05-14 10:26:46 +0100107ARMNN_NO_DEPRECATE_WARN_END
Francis Murtaghb87f5442021-09-23 13:20:53 +0100108
109 // Clear the previously PreImportedInput with the network Id and inputIds returned from ImportInputs()
110 // Note: This will happen automatically during destructor of armnn::LoadedNetwork
111 runtime->ClearImportedInputs(networkIdentifier1, importedInputVec);
112
113 // Clear the previously PreImportedOutputs with the network Id and outputIds returned from ImportOutputs()
114 // Note: This will happen automatically during destructor of armnn::LoadedNetwork
115 runtime->ClearImportedOutputs(networkIdentifier1, importedOutputVec);
116
Colm Donelan1358e042024-05-14 10:26:46 +0100117ARMNN_NO_DEPRECATE_WARN_BEGIN
Francis Murtaghb87f5442021-09-23 13:20:53 +0100118 // Execute with Non-PreImported inputTensor1, inputTensor2 and the PreImported outputTensor1
119 runtime->Execute(*memHandle.get(), {{0, inputTensor1}, {1, inputTensor2}}, {{2, outputTensor1}});
Colm Donelan1358e042024-05-14 10:26:46 +0100120ARMNN_NO_DEPRECATE_WARN_END
Francis Murtaghb87f5442021-09-23 13:20:53 +0100121
122 std::cout << "Your number was " << outputData1.data()[0] << std::endl;
123
124 return 0;
125}