blob: 09d71923b625793db123319d3e09d286c709035c [file] [log] [blame]
Jan Eilersbca73e12020-03-11 12:52:46 +00001//
Tracy Narine10403ec2023-11-28 11:55:08 +00002// Copyright © 2020-2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
Jan Eilersbca73e12020-03-11 12:52:46 +00003// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "EndToEndTestImpl.hpp"
8
9#include <armnn/INetwork.hpp>
10#include <armnn/TypesUtils.hpp>
Sadik Armagana097d2a2021-11-24 15:47:28 +000011
12#include <CommonTestUtils.hpp>
13
Jan Eilersbca73e12020-03-11 12:52:46 +000014#include <ResolveType.hpp>
15
Jan Eilersbca73e12020-03-11 12:52:46 +000016namespace
17{
18
19/** Defines the acceptable tolerance of ActivationFunction-DataType combinations.
20 *
21 * @param activationFunction The activation function used
22 * @param dataType Data type used
23 *
24 * @return Tolerance depending on the activation function and data type
25 */
26float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType)
27{
28 constexpr float defaultTolerance = 1e-6f;
29
30 switch (activationFunction)
31 {
32 // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp
33 case ActivationFunction::Elu:
34 return (dataType == DataType::Float16 ? 0.01f : 0.00001f);
Jan Eilersa83af7b2020-03-18 15:58:11 +000035 case ActivationFunction::HardSwish:
36 return (dataType == DataType::Float16 ? 0.01f : defaultTolerance);
Jan Eilersbca73e12020-03-11 12:52:46 +000037 default:
38 return defaultTolerance;
39 }
40}
41
42/** Creates a network with one layer of the activation function specified in the activation descriptor.
43 *
44 * @param inputInfo Tensor info of inputs
45 * @param outputInfo Tensor info of outputs
46 * @param descriptor Activation descriptor
47 *
48 * @return INetworkPtr A pointer to the created network
49 */
50armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo,
51 const armnn::TensorInfo& outputInfo,
52 const armnn::ActivationDescriptor& descriptor)
53{
54 using namespace armnn;
55
56 char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function);
57
58 INetworkPtr net(INetwork::Create());
59
Teresa Charlina4b60902024-02-07 20:55:53 +000060 IConnectableLayer* inputLayer = net->AddInputLayer(0, "input");
61 IConnectableLayer* activationLayer = net->AddActivationLayer(descriptor, ActivationName);
62 IConnectableLayer* outputLayer = net->AddOutputLayer(0, "output");
Jan Eilersbca73e12020-03-11 12:52:46 +000063
Teresa Charlina4b60902024-02-07 20:55:53 +000064 Connect(inputLayer, activationLayer, inputInfo, 0, 0);
65 Connect(activationLayer, outputLayer, outputInfo, 0, 0);
Jan Eilersbca73e12020-03-11 12:52:46 +000066
67 return net;
68}
69
70/** Specifies the implementation of end to end tests for activation functions.
71 *
72 * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType)
73 * - Creates a network with one layer of the activation function specified in the activation descriptor.
74 * - Executes the network on specified backends and compares results to expected output values
75 *
76 * @tparam ArmnnType The armnn data type for the input and expected-output data
77 * @param backends Backends to run test on
78 * @param floatInputData Input data given as vector of float
79 * @param floatExpectedOutputData Expected output data given as vector of float
80 * @param inputInfo Tensor info of inputs
81 * @param outputInfo Tensor info of outputs
82 * @param descriptor Activation descriptor
83 */
84template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
85void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
86 const std::vector<float>& floatInputData,
87 const std::vector<float>& floatExpectedOutputData,
88 const armnn::TensorInfo& inputInfo,
89 const armnn::TensorInfo& outputInfo,
90 const armnn::ActivationDescriptor& descriptor)
91{
92 using namespace armnn;
93
94 // Selectively quantizes/transforms float values to the needed data type
95 std::vector<T> inputData = armnnUtils::QuantizedVector<T>( floatInputData,
96 inputInfo.GetQuantizationScale(),
97 inputInfo.GetQuantizationOffset());
98 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>( floatExpectedOutputData,
99 outputInfo.GetQuantizationScale(),
100 outputInfo.GetQuantizationOffset());
101
102 INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor);
103
104 std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
105 std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
106
107 float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
108
Mike Kellya9c32672023-12-04 17:23:09 +0000109 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net),
Jan Eilersbca73e12020-03-11 12:52:46 +0000110 inputTensorData,
111 expectedOutputTensorData,
112 backends,
113 tolerance);
114}
115
Teresa Charlina4b60902024-02-07 20:55:53 +0000116std::vector<float> Activation(const std::vector<float>& input,
117 const ActivationDescriptor& descriptor)
Jan Eilersbca73e12020-03-11 12:52:46 +0000118{
Teresa Charlina4b60902024-02-07 20:55:53 +0000119 float a = descriptor.m_A;
120 float b = descriptor.m_B;
Jan Eilersbca73e12020-03-11 12:52:46 +0000121
Teresa Charlina4b60902024-02-07 20:55:53 +0000122 std::vector<float> output;
123 output.reserve(input.size());
Jan Eilersbca73e12020-03-11 12:52:46 +0000124
Teresa Charlina4b60902024-02-07 20:55:53 +0000125 // Compute the result of the activation function.
126 switch (descriptor.m_Function)
127 {
128 case ActivationFunction::Linear:
129 {
130 for (auto in :input)
131 {
132 auto out = a * in + b;
133 output.push_back(out);
134 }
135 break;
136 }
137 case ActivationFunction::Sigmoid:
138 {
139 for (auto in :input)
140 {
141 auto out = 1.f / (1.f + expf(-in));
142 output.push_back(out);
143 }
144 break;
145 }
146 case ActivationFunction::ReLu:
147 {
148 for (auto in :input)
149 {
150 auto out = std::max(0.f, in);
151 output.push_back(out);
152 }
153 break;
154 }
155 case ActivationFunction::BoundedReLu:
156 {
157 for (auto in :input)
158 {
159 auto out = std::min(a, std::max(b, in));
160 output.push_back(out);
161 }
162 break;
163 }
164 case ActivationFunction::SoftReLu:
165 {
166 for (auto in :input)
167 {
168 auto out = logf(1.0f + expf(in));
169 output.push_back(out);
170 }
171 break;
172 }
173 case ActivationFunction::LeakyReLu:
174 {
175 for (auto in :input)
176 {
177 auto out = in > 0.0f ? in : (in * a);
178 output.push_back(out);
179 }
180 break;
181 }
182 case ActivationFunction::Abs:
183 {
184 for (auto in :input)
185 {
186 auto out = in < 0 ? -in : in;
187 output.push_back(out);
188 }
189 break;
190 }
191 case ActivationFunction::Sqrt:
192 {
193 for (auto in :input)
194 {
195 auto out = sqrtf(in);
196 output.push_back(out);
197 }
198 break;
199 }
200 case ActivationFunction::Square:
201 {
202 for (auto in :input)
203 {
204 auto out = in * in;
205 output.push_back(out);
206 }
207 break;
208 }
209 case ActivationFunction::TanH:
210 {
211 for (auto in :input)
212 {
213 auto out = a * tanhf(b * in);
214 output.push_back(out);
215 }
216 break;
217 }
218 case ActivationFunction::Elu:
219 {
220 for (auto in: input) {
221 auto out = (in >= 0) ? in : a * (expf(in) - 1);
222 output.push_back(out);
223 }
224 break;
225 }
226 case ActivationFunction::HardSwish:
227 {
228 for (auto in :input)
229 {
230 // hard_swish(x) = x * relu6(x+3) / 6
231 // relu6(x) = min(max(x,0),6)
232 auto out = in * (std::min(std::max((in + 3), 0.0f), 6.0f)) / 6;
233 output.push_back(out);
234 }
235 break;
236 }
237 case ActivationFunction::Gelu:
238 {
239 for (auto in :input)
240 {
241 // gelu(x) = x * 1/2 * (1 + erf(x / sqrt(2))),
242 // where erf is Gaussian error function
243 auto out = in * (0.5f * (1.0f + erff(static_cast<float>(in / std::sqrt(2)))));
244 output.push_back(out);
245 }
246 break;
247 }
248 default:
249 {
250 throw InvalidArgumentException("Unsupported activation function");
251 }
252 }
253 return output;
Jan Eilersbca73e12020-03-11 12:52:46 +0000254}
255
Teresa Charlina4b60902024-02-07 20:55:53 +0000256/** Executes an end to end test for activation layers with specific input and expected-output data
Jan Eilersa83af7b2020-03-18 15:58:11 +0000257 *
258 * @tparam ArmnnType The armnn data type for the input and expected-output data
259 * @param backends The backends on which to run the test
260 */
261template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Teresa Charlina4b60902024-02-07 20:55:53 +0000262void ActivationEndToEndTest(const std::vector<BackendId>& backends,
263 const ActivationFunction activationFunction,
264 const float qScale=1.0f,
265 const int32_t qOffset=0,
266 const float a = 1,
267 const float b = 0)
Jan Eilersa83af7b2020-03-18 15:58:11 +0000268{
Teresa Charlina4b60902024-02-07 20:55:53 +0000269 std::vector<float> floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f,
Jan Eilersa83af7b2020-03-18 15:58:11 +0000270 1.0f, 2.0f, 3.0f, 4.0f };
271
Teresa Charlina4b60902024-02-07 20:55:53 +0000272 ActivationDescriptor descriptor(activationFunction, a, b);
Jan Eilersa83af7b2020-03-18 15:58:11 +0000273
Teresa Charlina4b60902024-02-07 20:55:53 +0000274 std::vector<float> floatExpectedOutputData = Activation(floatInputData, descriptor);
Tracy Narine10403ec2023-11-28 11:55:08 +0000275
276 armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
277 armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
278
Tracy Narine10403ec2023-11-28 11:55:08 +0000279 ActivationEndToEndImpl<ArmnnType>(backends,
280 floatInputData,
281 floatExpectedOutputData,
282 inputInfo,
283 outputInfo,
284 descriptor);
285}
286
287} // anonymous namespace