blob: 0b1bf772ce4f3e8ebe48f0c463868ef7f36374d2 [file] [log] [blame]
Jan Eilersbca73e12020-03-11 12:52:46 +00001//
2// Copyright © 2020 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "EndToEndTestImpl.hpp"
8
9#include <armnn/INetwork.hpp>
10#include <armnn/TypesUtils.hpp>
11#include <backendsCommon/test/CommonTestUtils.hpp>
12#include <ResolveType.hpp>
13
Jan Eilersbca73e12020-03-11 12:52:46 +000014namespace
15{
16
17/** Defines the acceptable tolerance of ActivationFunction-DataType combinations.
18 *
19 * @param activationFunction The activation function used
20 * @param dataType Data type used
21 *
22 * @return Tolerance depending on the activation function and data type
23 */
24float GetActivationTolerance(const armnn::ActivationFunction& activationFunction, DataType dataType)
25{
26 constexpr float defaultTolerance = 1e-6f;
27
28 switch (activationFunction)
29 {
30 // The following values are taken from ArmComputeLibrary/tests/validation/CL/ActivationLayer.cpp
31 case ActivationFunction::Elu:
32 return (dataType == DataType::Float16 ? 0.01f : 0.00001f);
Jan Eilersa83af7b2020-03-18 15:58:11 +000033 case ActivationFunction::HardSwish:
34 return (dataType == DataType::Float16 ? 0.01f : defaultTolerance);
Jan Eilersbca73e12020-03-11 12:52:46 +000035 default:
36 return defaultTolerance;
37 }
38}
39
40/** Creates a network with one layer of the activation function specified in the activation descriptor.
41 *
42 * @param inputInfo Tensor info of inputs
43 * @param outputInfo Tensor info of outputs
44 * @param descriptor Activation descriptor
45 *
46 * @return INetworkPtr A pointer to the created network
47 */
48armnn::INetworkPtr CreateActivationNetwork(const armnn::TensorInfo& inputInfo,
49 const armnn::TensorInfo& outputInfo,
50 const armnn::ActivationDescriptor& descriptor)
51{
52 using namespace armnn;
53
54 char const* ActivationName = GetActivationFunctionAsCString(descriptor.m_Function);
55
56 INetworkPtr net(INetwork::Create());
57
58 IConnectableLayer* input = net->AddInputLayer(0, "input");
59 IConnectableLayer* prelu = net->AddActivationLayer(descriptor, ActivationName);
60 IConnectableLayer* output = net->AddOutputLayer(0, "output");
61
62 Connect(input, prelu, inputInfo, 0, 0);
63 Connect(prelu, output, outputInfo, 0, 0);
64
65 return net;
66}
67
68/** Specifies the implementation of end to end tests for activation functions.
69 *
70 * - Converts input data and expected-output data to the data type that is desired for the test (ArmnnType)
71 * - Creates a network with one layer of the activation function specified in the activation descriptor.
72 * - Executes the network on specified backends and compares results to expected output values
73 *
74 * @tparam ArmnnType The armnn data type for the input and expected-output data
75 * @param backends Backends to run test on
76 * @param floatInputData Input data given as vector of float
77 * @param floatExpectedOutputData Expected output data given as vector of float
78 * @param inputInfo Tensor info of inputs
79 * @param outputInfo Tensor info of outputs
80 * @param descriptor Activation descriptor
81 */
82template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
83void ActivationEndToEndImpl(const std::vector<armnn::BackendId>& backends,
84 const std::vector<float>& floatInputData,
85 const std::vector<float>& floatExpectedOutputData,
86 const armnn::TensorInfo& inputInfo,
87 const armnn::TensorInfo& outputInfo,
88 const armnn::ActivationDescriptor& descriptor)
89{
90 using namespace armnn;
91
92 // Selectively quantizes/transforms float values to the needed data type
93 std::vector<T> inputData = armnnUtils::QuantizedVector<T>( floatInputData,
94 inputInfo.GetQuantizationScale(),
95 inputInfo.GetQuantizationOffset());
96 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>( floatExpectedOutputData,
97 outputInfo.GetQuantizationScale(),
98 outputInfo.GetQuantizationOffset());
99
100 INetworkPtr net = CreateActivationNetwork(inputInfo, outputInfo, descriptor);
101
102 std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
103 std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
104
105 float tolerance = GetActivationTolerance(descriptor.m_Function, ArmnnType);
106
107 EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net),
108 inputTensorData,
109 expectedOutputTensorData,
110 backends,
111 tolerance);
112}
113
114/** Executes an end to end test for Elu activation with specific input and expected-output data
115 *
116 * @tparam ArmnnType The armnn data type for the input and expected-output data
117 * @param backends The backends on which to run the test
118 */
119template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
120void EluEndToEndTest(const std::vector<BackendId>& backends)
121{
122 std::vector<float> floatInputData{ -2.0f, -1.0f, -0.0f, 0.0f,
123 1.0f, 2.0f, 3.0f, 4.0f };
124
125 std::vector<float> floatExpectedOutputData{ -0.86466471676f, -0.63212055882f, -0.0f, 0.0f,
Jan Eilersa83af7b2020-03-18 15:58:11 +0000126 1.0f , 2.0f , 3.0f, 4.0f };
Jan Eilersbca73e12020-03-11 12:52:46 +0000127
128 float qScale = 1.0f;
129 int32_t qOffset = 0;
130 armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
131 armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
132
133 armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
134
135 ActivationEndToEndImpl<ArmnnType>(backends,
136 floatInputData,
137 floatExpectedOutputData,
138 inputInfo,
139 outputInfo,
140 descriptor);
141}
142
Jan Eilersa83af7b2020-03-18 15:58:11 +0000143/** Executes an end to end test for HardSwish activation with specific input and expected-output data
144 *
145 * @tparam ArmnnType The armnn data type for the input and expected-output data
146 * @param backends The backends on which to run the test
147 */
148template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
149void HardSwishEndToEndTest(const std::vector<BackendId>& backends)
150{
151 std::vector<float> floatInputData{ -2.0f, -1.0f, -0.5f, 0.0f,
152 1.0f, 2.0f, 3.0f, 4.0f };
153
154 std::vector<float> floatExpectedOutputData{ -0.33333333333f, -0.33333333333f, -0.208333f, 0.0f,
155 0.66666666667f, 1.66666666667f, 3.0f , 4.0f };
156
157 float qScale = 1.0f;
158 int32_t qOffset = 0;
159 armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
160 armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
161
162 armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0);
163
164 ActivationEndToEndImpl<ArmnnType>(backends,
165 floatInputData,
166 floatExpectedOutputData,
167 inputInfo,
168 outputInfo,
169 descriptor);
170}
171
Jan Eilersbca73e12020-03-11 12:52:46 +0000172} // anonymous namespace