blob: 15a3937acae3674000ca349e4f2772ef2c97efe1 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include <armnn/ArmNN.hpp>
narpra01b9546cf2018-11-20 15:21:28 +00008#include <armnn/INetwork.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <backendsCommon/test/QuantizeHelper.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010011
narpra01b9546cf2018-11-20 15:21:28 +000012#include <boost/test/unit_test.hpp>
13
Aron Virginas-Tar70104002018-10-24 15:33:28 +010014#include <vector>
15
16namespace
17{
18
19using namespace armnn;
20
21template<typename T>
22bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
23 const TensorInfo& commonTensorInfo,
24 const std::vector<T>& inputData,
25 const std::vector<T>& constantData,
26 const std::vector<T>& expectedOutputData)
27{
28 // Create runtime in which test will run
29 IRuntime::CreationOptions options;
30 IRuntimePtr runtime(IRuntime::Create(options));
31
32 // Builds up the structure of the network.
33 INetworkPtr net(INetwork::Create());
34
35 IConnectableLayer* input = net->AddInputLayer(0);
36 IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData));
37 IConnectableLayer* add = net->AddAdditionLayer();
38 IConnectableLayer* output = net->AddOutputLayer(0);
39
40 input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
41 constant->GetOutputSlot(0).Connect(add->GetInputSlot(1));
42 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
43
44 // Sets the tensors in the network.
45 input->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
46 constant->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
47 add->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
48
49 // optimize the network
50 IOptimizedNetworkPtr optNet = Optimize(*net, computeDevice, runtime->GetDeviceSpec());
51
52 // Loads it into the runtime.
53 NetworkId netId;
54 runtime->LoadNetwork(netId, std::move(optNet));
55
56 // Creates structures for input & output.
57 std::vector<T> outputData(inputData.size());
58
59 InputTensors inputTensors
60 {
61 {0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
62 };
63 OutputTensors outputTensors
64 {
65 {0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
66 };
67
68 // Does the inference.
69 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
70
71 // Checks the results.
72 return outputData == expectedOutputData;
73}
74
75inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
76{
77 const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
78
79 return ConstantUsageTest(backends,
80 commonTensorInfo,
81 std::vector<float>{ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, // Input.
82 std::vector<float>{ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, // Const input.
83 std::vector<float>{ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f } // Expected output.
84 );
85}
86
87inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
88{
89 TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
90
91 const float scale = 0.023529f;
92 const int8_t offset = -43;
93
94 commonTensorInfo.SetQuantizationScale(scale);
95 commonTensorInfo.SetQuantizationOffset(offset);
96
97 return ConstantUsageTest(backends,
98 commonTensorInfo,
99 QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input.
100 QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input.
101 QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }) // Expected output.
102 );
103}
104
narpra01b9546cf2018-11-20 15:21:28 +0000105template<typename T>
106void EndToEndLayerTestImpl(INetworkPtr network,
107 const std::map<int, std::vector<T>>& inputTensorData,
108 const std::map<int, std::vector<T>>& expectedOutputData,
109 std::vector<BackendId> backends)
110{
111 // Create runtime in which test will run
112 IRuntime::CreationOptions options;
113 IRuntimePtr runtime(IRuntime::Create(options));
114
115 // optimize the network
116 IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
117
118 // Loads it into the runtime.
119 NetworkId netId;
120 runtime->LoadNetwork(netId, std::move(optNet));
121
122 InputTensors inputTensors;
123 inputTensors.reserve(inputTensorData.size());
124 for (auto&& it : inputTensorData)
125 {
126 inputTensors.push_back({it.first,
127 ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
128 }
129 OutputTensors outputTensors;
130 outputTensors.reserve(expectedOutputData.size());
131 std::map<int, std::vector<T>> outputStorage;
132 for (auto&& it : expectedOutputData)
133 {
134 std::vector<T> out(it.second.size());
135 outputStorage.emplace(it.first, out);
136 outputTensors.push_back({it.first,
137 Tensor(runtime->GetOutputTensorInfo(netId, it.first),
138 outputStorage.at(it.first).data())});
139 }
140
141 // Does the inference.
142 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
143
144 // Checks the results.
145 for (auto&& it : expectedOutputData)
146 {
147 std::vector<T> out = outputStorage.at(it.first);
148 BOOST_TEST(it.second == out);
149 }
150}
151
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100152} // anonymous namespace