blob: f9de3b928f4437f397a36b9c0ab2810f54b01145 [file] [log] [blame]
Idriss Chaouch98e383e2023-08-28 14:28:31 +01001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6#include "armnn/INetwork.hpp"
7#include "armnnUtils/QuantizeHelper.hpp"
8#include "ElementwiseBinaryEndToEndTestImpl.hpp"
9#include "Optimizer.hpp"
10#include <CommonTestUtils.hpp>
11#include <ResolveType.hpp>
12#include <doctest/doctest.h>
13
14namespace
15{
16 using namespace armnn;
17 armnn::INetworkPtr CreateBroadcastToNetwork(BroadcastToDescriptor& descriptor,
18 const armnn::TensorInfo& inputInfo,
19 const armnn::TensorInfo& outputInfo)
20 {
21 INetworkPtr network(INetwork::Create());
22 IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
23 IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
24 IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
25 Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
26 Connect(broadcastLayer, outputLayer, outputInfo, 0, 0);
27 return network;
28 }
29
30 armnn::INetworkPtr CreateBroadcastToNetworkWithElementWiseBinary(BroadcastToDescriptor& descriptor,
31 const ElementwiseBinaryDescriptor&
32 elementWiseDescriptor,
33 const armnn::TensorInfo& inputInfo,
34 const armnn::TensorInfo& inputInfoElementWise,
35 const armnn::TensorInfo& outputInfo)
36 {
37 INetworkPtr network(INetwork::Create());
38 IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
39 IConnectableLayer* inputLayerElementWise = network->AddInputLayer(1, "inputElementWiseBinary");
40 IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
41 IConnectableLayer* multiplicationLayer =
42 network->AddElementwiseBinaryLayer(elementWiseDescriptor,
43 "multiplication");
44 IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
45 Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
46 Connect(inputLayerElementWise, multiplicationLayer,
47 inputInfoElementWise, 0, 1);
48 Connect(broadcastLayer, multiplicationLayer, inputInfo, 0, 0);
49 Connect(multiplicationLayer, outputLayer, outputInfo, 0, 0);
50 return network;
51 }
52
53 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
54 void BroadcastToEndToEnd(const std::vector<BackendId>& backends)
55 {
56 float qScale = 1.0f;
57 int32_t qOffset = 0;
58 bool qConst = true;
59
60 const TensorShape inputTensorShape = { {1, 4} };
61 const TensorShape outputTensorShape = { {4, 4} };
62
63 TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
64 qOffset, qConst);
65 TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
66 qOffset);
67
68 std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
69 65, 144, 91, 161
70 }, qScale, qOffset);
71
72 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>({
73 65, 144, 91, 161,
74 65, 144, 91, 161,
75 65, 144, 91, 161,
76 65, 144, 91, 161
77 }, qScale, qOffset);
78
79 auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
80 CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
81 INetworkPtr network = CreateBroadcastToNetwork(descriptor, inputInfo, outputInfo);
82
83 std::map<int, std::vector<T>> inputTensor = { { 0, inputData } };
84 std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
85 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
86 expectedOutputTensor, backends);
87 }
88
89 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Idriss Chaouch564c13d2023-09-01 17:58:38 +010090 void BroadcastToEndToEndElementWiseBinary(const std::vector<BackendId>& backends,
91 const ElementwiseBinaryDescriptor& elementWiseDescriptor)
Idriss Chaouch98e383e2023-08-28 14:28:31 +010092 {
93 float qScale = 1.0f;
94 int32_t qOffset = 0;
95 bool qConst = true;
96
97 const TensorShape inputTensorShape = { {1, 4} };
98 const TensorShape outputTensorShape = { {4, 4} };
99
100 const TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
101 qOffset, qConst);
102 const TensorInfo inputInfoElementWise (outputTensorShape, ArmnnType, qScale,
103 qOffset, qConst);
104 const TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
105 qOffset);
106
107 std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
108 65, 144, 91, 161
109 }, qScale, qOffset);
110
111 std::vector<T> inputDataElementWise = armnnUtils::QuantizedVector<T>({
112 1, 1, 1, 1,
113 1, 1, 1, 1,
114 1, 1, 1, 1,
115 1, 1, 1, 1
116 }, qScale, qOffset);
117
Idriss Chaouch564c13d2023-09-01 17:58:38 +0100118 std::vector<T> expectedOutputData;
119 if (elementWiseDescriptor.m_Operation == BinaryOperation::Mul ||
120 elementWiseDescriptor.m_Operation == BinaryOperation::Div) {
121 expectedOutputData = armnnUtils::QuantizedVector<T>({
122 65, 144, 91, 161,
123 65, 144, 91, 161,
124 65, 144, 91, 161,
125 65, 144, 91, 161
126 }, qScale, qOffset);
127 }
128 else if (elementWiseDescriptor.m_Operation == BinaryOperation::Add)
129 {
130 expectedOutputData = armnnUtils::QuantizedVector<T>({
131 66, 145, 92, 162,
132 66, 145, 92, 162,
133 66, 145, 92, 162,
134 66, 145, 92, 162
135 }, qScale, qOffset);
136 }
137 else if (elementWiseDescriptor.m_Operation == BinaryOperation::Sub)
138 {
139 expectedOutputData = armnnUtils::QuantizedVector<T>({
140 64, 143, 90, 160,
141 64, 143, 90, 160,
142 64, 143, 90, 160,
143 64, 143, 90, 160
144 }, qScale, qOffset);
145 }
Idriss Chaouch98e383e2023-08-28 14:28:31 +0100146
147 auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
148 CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
149 INetworkPtr network = CreateBroadcastToNetworkWithElementWiseBinary(descriptor,
Idriss Chaouch564c13d2023-09-01 17:58:38 +0100150 elementWiseDescriptor,
Idriss Chaouch98e383e2023-08-28 14:28:31 +0100151 inputInfo,
152 inputInfoElementWise,
153 outputInfo);
154 // Create ArmNN runtime
155 IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions());
156
157 // Optimise ArmNN network
158 IOptimizedNetworkPtr optNet = Optimize(*network, {Compute::CpuRef},
159 run->GetDeviceSpec());
160
161 Graph& graph = GetGraphForTesting(optNet.get());
162
163 Optimizer::Pass(graph,
164 armnn::MakeOptimizations(armnn::optimizations::BroadcastToOptimizationLayer()));
165
166 std::map<int, std::vector<T>> inputTensor = { { 0, inputData }, {1, inputDataElementWise} };
167 std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
168 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
169 expectedOutputTensor, backends);
170 }
171
172} // anonymous namespace