blob: dbc270e0c918f85b11fe0e19a4e9b17ce96bac32 [file] [log] [blame]
Mike Kelly3ec30772023-03-08 13:47:17 +00001//
Tracy Narinee7d27852024-01-26 09:13:19 +00002// Copyright © 2023-2024 Arm Ltd and contributors. All rights reserved.
Mike Kelly3ec30772023-03-08 13:47:17 +00003// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "CommonTestUtils.hpp"
8
9#include <ResolveType.hpp>
10
11#include <armnn/INetwork.hpp>
12#include <armnn/utility/NumericCast.hpp>
13
14#include <doctest/doctest.h>
15
16#include <vector>
17
18namespace
19{
20
21template<armnn::DataType ArmnnTypeInput>
22INetworkPtr CreateElementwiseBinaryNetwork(const TensorShape& input1Shape,
23 const TensorShape& input2Shape,
24 const TensorShape& outputShape,
25 BinaryOperation operation,
26 const float qScale = 1.0f,
27 const int32_t qOffset = 0)
28{
29 using namespace armnn;
30
31 INetworkPtr net(INetwork::Create());
32
33 TensorInfo input1TensorInfo(input1Shape, ArmnnTypeInput, qScale, qOffset, true);
34 TensorInfo input2TensorInfo(input2Shape, ArmnnTypeInput, qScale, qOffset, true);
35 TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
36
37 IConnectableLayer* input1 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
38 IConnectableLayer* input2 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(1));
39 IConnectableLayer* elementwiseBinaryLayer = net->AddElementwiseBinaryLayer(operation, "elementwiseUnary");
40 IConnectableLayer* output = net->AddOutputLayer(0, "output");
41
42 Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
43 Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
44 Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
45
46 return net;
47}
48
49template<armnn::DataType ArmnnInType,
50 typename TInput = armnn::ResolveType<ArmnnInType>>
51void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
52 BinaryOperation operation)
53{
54 using namespace armnn;
55
56 const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
57 const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
58
59 const TensorShape& input1Shape = { 2, 2, 2, 2 };
60 const TensorShape& input2Shape = { 1 };
61 const TensorShape& outputShape = { 2, 2, 2, 2 };
62
63 // Builds up the structure of the network
64 INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
65 operation, qScale, qOffset);
66
67 CHECK(net);
68
69 const std::vector<float> input1({ 1, -1, 1, 1, 5, -5, 5, 5, -3, 3, 3, 3, 4, 4, -4, 4 });
70
71 const std::vector<float> input2({ 2 });
72 std::vector<float> expectedOutput;
73 switch (operation) {
74 case armnn::BinaryOperation::Add:
75 expectedOutput = { 3, 1, 3, 3, 7, -3, 7, 7, -1, 5, 5, 5, 6, 6, -2, 6 };
76 break;
77 case armnn::BinaryOperation::Div:
78 expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f, 2.5f, -2.5f, 2.5f, 2.5f, -1.5f, 1.5f, 1.5f, 1.5f, 2, 2, -2, 2};
79 break;
80 case armnn::BinaryOperation::Maximum:
81 expectedOutput = { 2, 2, 2, 2, 5, 2, 5, 5, 2, 3, 3, 3, 4, 4, 2, 4 };
82 break;
83 case armnn::BinaryOperation::Minimum:
84 expectedOutput = { 1, -1, 1, 1, 2, -5, 2, 2, -3, 2, 2, 2, 2, 2, -4, 2 };
85 break;
86 case armnn::BinaryOperation::Mul:
87 expectedOutput = { 2, -2, 2, 2, 10, -10, 10, 10, -6, 6, 6, 6, 8, 8, -8, 8 };
88 break;
89 case armnn::BinaryOperation::Sub:
90 expectedOutput = { -1, -3, -1, -1, 3, -7, 3, 3, -5, 1, 1, 1, 2, 2, -6, 2 };
91 break;
John Mcloughlin0ec00872023-05-15 17:03:49 +010092 case armnn::BinaryOperation::SqDiff:
93 expectedOutput = { 1, 9, 1, 1, 9, 49, 9, 9, 25, 1, 1, 1, 4, 4, 36, 4 };
94 break;
95 case armnn::BinaryOperation::Power:
96 expectedOutput = { 1, 1, 1, 1, 25, 25, 25, 25, 9, 9, 9, 9, 16, 16, 16, 16 };
97 break;
Mike Kelly3ec30772023-03-08 13:47:17 +000098 default:
99 throw("Invalid Elementwise Binary operation");
100 }
101 const std::vector<float> expectedOutput_const = expectedOutput;
102 // quantize data
103 std::vector<TInput> qInput1Data = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
104 std::vector<TInput> qInput2Data = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
105 std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
106
107 std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInput1Data }, { 1, qInput2Data }};
108 std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
109
110 EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
111}
112
Tracy Narinee7d27852024-01-26 09:13:19 +0000113
114template<armnn::DataType ArmnnInType,
115 typename TInput = armnn::ResolveType<ArmnnInType>>
116void ElementwiseBinarySimple3DEndToEnd(const std::vector<BackendId>& backends,
117 BinaryOperation operation)
118{
119 using namespace armnn;
120
121 const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
122 const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
123
124 const TensorShape& input1Shape = { 2, 2 };
125 const TensorShape& input2Shape = { 2, 2 };
126 const TensorShape& outputShape = { 2, 2 };
127
128 // Builds up the structure of the network
129 INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
130 operation, qScale, qOffset);
131
132 CHECK(net);
133
134 const std::vector<float> input1({ 1, -1, 1, 1 });
135
136 const std::vector<float> input2({ 2, 2, 2, 2 });
137 std::vector<float> expectedOutput;
Teresa Charlin20dda372024-02-08 16:23:25 +0000138 switch (operation)
139 {
Tracy Narinee7d27852024-01-26 09:13:19 +0000140 case armnn::BinaryOperation::Add:
141 expectedOutput = { 3, 1, 3, 3 };
142 break;
143 case armnn::BinaryOperation::Div:
144 expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f };
145 break;
146 case armnn::BinaryOperation::Maximum:
147 expectedOutput = { 2, 2, 2, 2 };
148 break;
149 case armnn::BinaryOperation::Minimum:
150 expectedOutput = { 1, -1, 1, 1 };
151 break;
152 case armnn::BinaryOperation::Mul:
153 expectedOutput = { 2, -2, 2, 2 };
154 break;
155 case armnn::BinaryOperation::Sub:
156 expectedOutput = { -1, -3, -1, -1 };
157 break;
158 case armnn::BinaryOperation::SqDiff:
159 expectedOutput = { 1, 9, 1, 1 };
160 break;
161 case armnn::BinaryOperation::Power:
162 expectedOutput = { 1, 1, 1, 1 };
163 break;
164 default:
165 throw("Invalid Elementwise Binary operation");
166 }
167 const std::vector<float> expectedOutput_const = expectedOutput;
168 // quantize data
169 std::vector<TInput> qInput1Data = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
170 std::vector<TInput> qInput2Data = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
171 std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
172
173 std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInput1Data }, { 1, qInput2Data }};
174 std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
175
176 EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
177}
178
Tianle Cheng7790dc62023-12-12 13:52:22 +0000179template<armnn::DataType ArmnnInType,
180 typename TInput = armnn::ResolveType<ArmnnInType>>
181void ElementwiseBinarySimpleNoReshapeEndToEnd(const std::vector<BackendId>& backends,
182 BinaryOperation operation)
183{
184 using namespace armnn;
185
186 const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
187 const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
188
189 const TensorShape& input1Shape = { 2, 2, 2, 2 };
190 const TensorShape& input2Shape = { 2, 2, 2, 2 };
191 const TensorShape& outputShape = { 2, 2, 2, 2 };
192
193 // Builds up the structure of the network
194 INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
195 operation, qScale, qOffset);
196
197 CHECK(net);
198
199 const std::vector<float> input1({ 1, -1, 1, 1, 5, -5, 5, 5, -3, 3, 3, 3, 4, 4, -4, 4 });
200
201 const std::vector<float> input2({ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 });
202
203 std::vector<float> expectedOutput;
204 switch (operation) {
205 case armnn::BinaryOperation::Add:
206 expectedOutput = { 3, 1, 3, 3, 7, -3, 7, 7, -1, 5, 5, 5, 6, 6, -2, 6 };
207 break;
208 case armnn::BinaryOperation::Div:
209 expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f, 2.5f, -2.5f, 2.5f, 2.5f, -1.5f, 1.5f, 1.5f, 1.5f, 2, 2, -2, 2};
210 break;
211 case armnn::BinaryOperation::Maximum:
212 expectedOutput = { 2, 2, 2, 2, 5, 2, 5, 5, 2, 3, 3, 3, 4, 4, 2, 4 };
213 break;
214 case armnn::BinaryOperation::Minimum:
215 expectedOutput = { 1, -1, 1, 1, 2, -5, 2, 2, -3, 2, 2, 2, 2, 2, -4, 2 };
216 break;
217 case armnn::BinaryOperation::Mul:
218 expectedOutput = { 2, -2, 2, 2, 10, -10, 10, 10, -6, 6, 6, 6, 8, 8, -8, 8 };
219 break;
220 case armnn::BinaryOperation::Sub:
221 expectedOutput = { -1, -3, -1, -1, 3, -7, 3, 3, -5, 1, 1, 1, 2, 2, -6, 2 };
222 break;
223 case armnn::BinaryOperation::SqDiff:
224 expectedOutput = { 1, 9, 1, 1, 9, 49, 9, 9, 25, 1, 1, 1, 4, 4, 36, 4 };
225 break;
226 case armnn::BinaryOperation::Power:
227 expectedOutput = { 1, 1, 1, 1, 25, 25, 25, 25, 9, 9, 9, 9, 16, 16, 16, 16 };
228 break;
229 default:
230 throw("Invalid Elementwise Binary operation");
231 }
232
233 const std::vector<float> expectedOutput_const = expectedOutput;
234 // quantize data
235 std::vector<TInput> qInput1Data = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
236 std::vector<TInput> qInput2Data = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
237 std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
238
239 std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInput1Data }, { 1, qInput2Data }};
240 std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
241
242 EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
243}
244
Mike Kelly3ec30772023-03-08 13:47:17 +0000245} // anonymous namespace