blob: 793ab309425f4b1aaa91f1ccd7b244b73126e32c [file] [log] [blame]
Moritz Pflanzer443c8b92017-06-27 12:36:21 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "CL/CLAccessor.h"
25#include "CL/Helper.h"
26#include "Globals.h"
27#include "PaddingCalculator.h"
28#include "TensorLibrary.h"
29#include "TypePrinter.h"
30#include "Utils.h"
31#include "validation/Datasets.h"
32#include "validation/Helpers.h"
33#include "validation/Reference.h"
34#include "validation/Validation.h"
35
36#include "arm_compute/core/Helpers.h"
37#include "arm_compute/core/Types.h"
38#include "arm_compute/runtime/CL/CLTensor.h"
39#include "arm_compute/runtime/CL/CLTensorAllocator.h"
40#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
41
42#include "boost_wrapper.h"
43
44#include <random>
45#include <string>
46#include <tuple>
47
48using namespace arm_compute;
49using namespace arm_compute::test;
50using namespace arm_compute::test::cl;
51using namespace arm_compute::test::validation;
52
53namespace
54{
55/** Define tolerance of the activation layer
56 *
57 * @param[in] activation The activation function used.
58 * @param[in] fixed_point_position Number of bits for the fractional part..
59 *
60 * @return Tolerance depending on the activation function.
61 */
62float activation_layer_tolerance(ActivationLayerInfo::ActivationFunction activation, int fixed_point_position = 0)
63{
64 switch(activation)
65 {
66 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
67 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
68 case ActivationLayerInfo::ActivationFunction::SQRT:
69 case ActivationLayerInfo::ActivationFunction::TANH:
70 return (fixed_point_position != 0) ? 5.f : 0.00001f;
71 break;
72 default:
73 return 0.f;
74 }
75}
76
77/** Compute CL activation layer function.
78 *
79 * @param[in] in_place Compute the activation layer in-place.
80 * @param[in] shape Shape of the input and output tensors.
81 * @param[in] dt Shape Data type of tensors.
82 * @param[in] act_info Activation layer information.
83 * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers.
84 *
85 * @return Computed output tensor.
86 */
87CLTensor compute_activation_layer(bool in_place, const TensorShape &shape, DataType dt, ActivationLayerInfo act_info, int fixed_point_position = 0)
88{
89 // Create tensors
90 CLTensor src = create_tensor(shape, dt, 1, fixed_point_position);
91 CLTensor dst = create_tensor(shape, dt, 1, fixed_point_position);
92
93 // Create and configure function
94 CLActivationLayer act_layer;
95
96 if(in_place)
97 {
98 act_layer.configure(&src, nullptr, act_info);
99 }
100 else
101 {
102 act_layer.configure(&src, &dst, act_info);
103 }
104
105 // Allocate tensors
106 src.allocator()->allocate();
107 BOOST_TEST(!src.info()->is_resizable());
108
109 if(!in_place)
110 {
111 dst.allocator()->allocate();
112 BOOST_TEST(!dst.info()->is_resizable());
113 }
114
115 // Fill tensors
116 if(dt == DataType::F32)
117 {
118 float min_bound = 0;
119 float max_bound = 0;
120 std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<float>(act_info.activation());
121 std::uniform_real_distribution<> distribution(min_bound, max_bound);
122 library->fill(CLAccessor(src), distribution, 0);
123 }
124 else
125 {
126 int min_bound = 0;
127 int max_bound = 0;
128 std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
129 std::uniform_int_distribution<> distribution(min_bound, max_bound);
130 library->fill(CLAccessor(src), distribution, 0);
131 }
132
133 // Compute function
134 act_layer.run();
135
136 if(in_place)
137 {
138 return src;
139 }
140 else
141 {
142 return dst;
143 }
144}
145} // namespace
146
147#ifndef DOXYGEN_SKIP_THIS
148BOOST_AUTO_TEST_SUITE(CL)
149BOOST_AUTO_TEST_SUITE(ActivationLayer)
150
151BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
152BOOST_DATA_TEST_CASE(Configuration, boost::unit_test::data::make({ false, true }) * (SmallShapes() + LargeShapes()) * CNNFloatDataTypes(), in_place, shape, dt)
153{
154 // Set fixed point position data type allowed
155 const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
156
157 // Create tensors
158 CLTensor src = create_tensor(shape, dt, 1, fixed_point_position);
159 CLTensor dst = create_tensor(shape, dt, 1, fixed_point_position);
160
161 BOOST_TEST(src.info()->is_resizable());
162 BOOST_TEST(dst.info()->is_resizable());
163
164 // Create and configure function
165 CLActivationLayer act_layer;
166
167 if(in_place)
168 {
169 act_layer.configure(&src, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS));
170 }
171 else
172 {
173 act_layer.configure(&src, &dst, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS));
174 }
175
176 // Validate valid region
177 const ValidRegion valid_region = shape_to_valid_region(shape);
178 validate(src.info()->valid_region(), valid_region);
179
180 if(!in_place)
181 {
182 validate(dst.info()->valid_region(), valid_region);
183 }
184
185 // Validate padding
186 const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
187 validate(src.info()->padding(), padding);
188
189 if(!in_place)
190 {
191 validate(dst.info()->padding(), padding);
192 }
193}
194
195BOOST_AUTO_TEST_SUITE(Float)
196BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
197BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * CNNFloatDataTypes() * ActivationFunctions(), in_place, shape, dt, act_function)
198{
199 // Create activation layer info
200 ActivationLayerInfo act_info(act_function, 1.f, 1.f);
201
202 // Compute function
203 CLTensor dst = compute_activation_layer(in_place, shape, dt, act_info);
204
205 // Compute reference
206 RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info);
207
208 // Validate output
209 validate(CLAccessor(dst), ref_dst, activation_layer_tolerance(act_function));
210}
211
212BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
213BOOST_DATA_TEST_CASE(RunLarge, boost::unit_test::data::make({ false, true }) * LargeShapes() * CNNFloatDataTypes() * ActivationFunctions(), in_place, shape, dt, act_function)
214{
215 // Create activation layer info
216 ActivationLayerInfo act_info(act_function, 1.f, 1.f);
217
218 // Compute function
219 CLTensor dst = compute_activation_layer(in_place, shape, dt, act_info);
220
221 // Compute reference
222 RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info);
223
224 // Validate output
225 validate(CLAccessor(dst), ref_dst, activation_layer_tolerance(act_function));
226}
227BOOST_AUTO_TEST_SUITE_END()
228
229BOOST_AUTO_TEST_SUITE_END()
230BOOST_AUTO_TEST_SUITE_END()
231#endif