blob: 10e8363c7fa616228260275553a4746b0c340cb8 [file] [log] [blame]
Jan Eilersbca73e12020-03-11 12:52:46 +00001//
2// Copyright © 2020 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "EndToEndTestImpl.hpp"
8
9#include <armnn/INetwork.hpp>
10#include <armnn/TypesUtils.hpp>
Sadik Armagana097d2a2021-11-24 15:47:28 +000011
12#include <CommonTestUtils.hpp>
13
Jan Eilersbca73e12020-03-11 12:52:46 +000014#include <ResolveType.hpp>
15
Jan Eilersbca73e12020-03-11 12:52:46 +000016namespace
17{
18
19/** Defines the acceptable tolerance of ActivationFunction-DataType combinations.
20 *
21 * @param activationFunction The activation function used
22 * @param dataType Data type used
23 *
24 * @return Tolerance depending on the activation function and data type
25 */
26float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType)
27{
28 constexpr float defaultTolerance = 1e-6f;
29
30 switch (activationFunction)
31 {
32 // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp
33 case ActivationFunction::Elu:
34 return (dataType == DataType::Float16 ? 0.01f : 0.00001f);
Jan Eilersa83af7b2020-03-18 15:58:11 +000035 case ActivationFunction::HardSwish:
36 return (dataType == DataType::Float16 ? 0.01f : defaultTolerance);
Jan Eilersbca73e12020-03-11 12:52:46 +000037 default:
38 return defaultTolerance;
39 }
40}
41
42/** Creates a network with one layer of the activation function specified in the activation descriptor.
43 *
44 * @param inputInfo Tensor info of inputs
45 * @param outputInfo Tensor info of outputs
46 * @param descriptor Activation descriptor
47 *
48 * @return INetworkPtr A pointer to the created network
49 */
50armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo,
51 const armnn::TensorInfo& outputInfo,
52 const armnn::ActivationDescriptor& descriptor)
53{
54 using namespace armnn;
55
56 char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function);
57
58 INetworkPtr net(INetwork::Create());
59
60 IConnectableLayer* input = net->AddInputLayer(0, "input");
61 IConnectableLayer* prelu = net->AddActivationLayer(descriptor, ActivationName);
62 IConnectableLayer* output = net->AddOutputLayer(0, "output");
63
64 Connect(input, prelu, inputInfo, 0, 0);
65 Connect(prelu, output, outputInfo, 0, 0);
66
67 return net;
68}
69
70/** Specifies the implementation of end to end tests for activation functions.
71 *
72 * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType)
73 * - Creates a network with one layer of the activation function specified in the activation descriptor.
74 * - Executes the network on specified backends and compares results to expected output values
75 *
76 * @tparam ArmnnType The armnn data type for the input and expected-output data
77 * @param backends Backends to run test on
78 * @param floatInputData Input data given as vector of float
79 * @param floatExpectedOutputData Expected output data given as vector of float
80 * @param inputInfo Tensor info of inputs
81 * @param outputInfo Tensor info of outputs
82 * @param descriptor Activation descriptor
83 */
84template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
85void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
86 const std::vector<float>& floatInputData,
87 const std::vector<float>& floatExpectedOutputData,
88 const armnn::TensorInfo& inputInfo,
89 const armnn::TensorInfo& outputInfo,
90 const armnn::ActivationDescriptor& descriptor)
91{
92 using namespace armnn;
93
94 // Selectively quantizes/transforms float values to the needed data type
95 std::vector<T> inputData = armnnUtils::QuantizedVector<T>( floatInputData,
96 inputInfo.GetQuantizationScale(),
97 inputInfo.GetQuantizationOffset());
98 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>( floatExpectedOutputData,
99 outputInfo.GetQuantizationScale(),
100 outputInfo.GetQuantizationOffset());
101
102 INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor);
103
104 std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
105 std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
106
107 float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
108
109 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
110 inputTensorData,
111 expectedOutputTensorData,
112 backends,
113 tolerance);
114}
115
116/** Executes an end to end test for Elu activation with specific input and expected-output data
117 *
118 * @tparam ArmnnType The armnn data type for the input and expected-output data
119 * @param backends The backends on which to run the test
120 */
121template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
122void EluEndToEndTest(const std::vector<BackendId>& backends)
123{
124 std::vector<float> floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f,
125 1.0f, 2.0f, 3.0f, 4.0f };
126
127 std::vector<float> floatExpectedOutputData{ -0.86466471676f, -0.63212055882f, -0.0f, 0.0f,
Jan Eilersa83af7b2020-03-18 15:58:11 +0000128 1.0f , 2.0f , 3.0f, 4.0f };
Jan Eilersbca73e12020-03-11 12:52:46 +0000129
130 float qScale = 1.0f;
131 int32_t qOffset = 0;
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100132 armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
Jan Eilersbca73e12020-03-11 12:52:46 +0000133 armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
134
135 armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
136
137 ActivationEndToEndImpl<ArmnnType>(backends,
138 floatInputData,
139 floatExpectedOutputData,
140 inputInfo,
141 outputInfo,
142 descriptor);
143}
144
Jan Eilersa83af7b2020-03-18 15:58:11 +0000145/** Executes an end to end test for HardSwish activation with specific input and expected-output data
146 *
147 * @tparam ArmnnType The armnn data type for the input and expected-output data
148 * @param backends The backends on which to run the test
149 */
150template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
151void HardSwishEndToEndTest(const std::vector<BackendId>& backends)
152{
153 std::vector<float> floatInputData{ -2.0f, -1.0f, -0.5f, 0.0f,
154 1.0f, 2.0f, 3.0f, 4.0f };
155
156 std::vector<float> floatExpectedOutputData{ -0.33333333333f, -0.33333333333f, -0.208333f, 0.0f,
157 0.66666666667f, 1.66666666667f, 3.0f , 4.0f };
158
159 float qScale = 1.0f;
160 int32_t qOffset = 0;
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100161 armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
Jan Eilersa83af7b2020-03-18 15:58:11 +0000162 armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
163
164 armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0);
165
166 ActivationEndToEndImpl<ArmnnType>(backends,
167 floatInputData,
168 floatExpectedOutputData,
169 inputInfo,
170 outputInfo,
171 descriptor);
172}
173
Jan Eilersbca73e12020-03-11 12:52:46 +0000174} // anonymous namespace