blob: ff791effa0e7f98a46167838a6a2732f741fb035 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "NEON/Helper.h"
25#include "NEON/NEAccessor.h"
26#include "TypePrinter.h"
27#include "validation/Datasets.h"
28#include "validation/Reference.h"
29#include "validation/Validation.h"
30
31#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
32
33#include <random>
34
35using namespace arm_compute;
36using namespace arm_compute::test;
37using namespace arm_compute::test::neon;
38using namespace arm_compute::test::validation;
39
40namespace
41{
42/** Define tolerance of the normalization layer depending on values data type.
43 *
44 * @param[in] dt Data type of the tensors' values.
45 *
46 * @return Tolerance depending on the data type.
47 */
48float normalization_layer_tolerance(DataType dt)
49{
50 switch(dt)
51 {
52 case DataType::QS8:
53 return 2.0f;
54 case DataType::F32:
55 return 1e-05;
56 default:
57 return 0.f;
58 }
59}
60
61/** Compute Neon normalization layer function.
62 *
63 * @param[in] shape Shape of the input and output tensors.
64 * @param[in] dt Data type of input and output tensors.
65 * @param[in] norm_info Normalization Layer information.
66 * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0).
67 *
68 * @return Computed output tensor.
69 */
70Tensor compute_normalization_layer(const TensorShape &shape, DataType dt, NormalizationLayerInfo norm_info, int fixed_point_position = 0)
71{
72 // Create tensors
73 Tensor src = create_tensor(shape, dt, 1, fixed_point_position);
74 Tensor dst = create_tensor(shape, dt, 1, fixed_point_position);
75
76 // Create and configure function
77 NENormalizationLayer norm;
78 norm.configure(&src, &dst, norm_info);
79
80 // Allocate tensors
81 src.allocator()->allocate();
82 dst.allocator()->allocate();
83
84 BOOST_TEST(!src.info()->is_resizable());
85 BOOST_TEST(!dst.info()->is_resizable());
86
87 // Fill tensors
88 if(dt == DataType::QS8)
89 {
90 const int8_t one_fixed_point = 1 << fixed_point_position;
91 const int8_t minus_one_fixed_point = -one_fixed_point;
92 library->fill_tensor_uniform(NEAccessor(src), 0, minus_one_fixed_point, one_fixed_point);
93 }
94 else
95 {
96 library->fill_tensor_uniform(NEAccessor(src), 0);
97 }
98
99 // Compute function
100 norm.run();
101
102 return dst;
103}
104} // namespace
105
106#ifndef DOXYGEN_SKIP_THIS
107BOOST_AUTO_TEST_SUITE(NEON)
108BOOST_AUTO_TEST_SUITE(NormalizationLayer)
109
110BOOST_AUTO_TEST_SUITE(Float)
111BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
112BOOST_DATA_TEST_CASE(RunSmall,
113 SmallShapes() * DataType::F32 *NormalizationTypes() * boost::unit_test::data::xrange(3, 9, 2) * boost::unit_test::data::make({ 0.5f, 1.0f, 2.0f }),
114 shape, dt, norm_type, norm_size, beta)
115{
116 // Provide normalization layer information
117 NormalizationLayerInfo norm_info(norm_type, norm_size, 5, beta);
118
119 // Compute function
120 Tensor dst = compute_normalization_layer(shape, dt, norm_info);
121
122 // Compute reference
123 RawTensor ref_dst = Reference::compute_reference_normalization_layer(shape, dt, norm_info);
124
125 // Validate output
126 validate(NEAccessor(dst), ref_dst, normalization_layer_tolerance(DataType::F32));
127}
128BOOST_AUTO_TEST_SUITE_END()
129
130BOOST_AUTO_TEST_SUITE(Quantized)
131BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
132BOOST_DATA_TEST_CASE(RunSmall,
133 SmallShapes() * DataType::QS8 *NormalizationTypes() * boost::unit_test::data::xrange(3, 7, 2) * (boost::unit_test::data::xrange(1, 6) * boost::unit_test::data::make({ 0.5f, 1.0f, 2.0f })),
134 shape, dt, norm_type, norm_size, fixed_point_position, beta)
135{
136 // Provide normalization layer information
137 NormalizationLayerInfo norm_info(norm_type, norm_size, 5, beta, 1.f);
138
139 // Compute function
140 Tensor dst = compute_normalization_layer(shape, dt, norm_info, fixed_point_position);
141
142 // Compute reference
143 RawTensor ref_dst = Reference::compute_reference_normalization_layer(shape, dt, norm_info, fixed_point_position);
144
145 // Validate output
146 validate(NEAccessor(dst), ref_dst, normalization_layer_tolerance(DataType::QS8));
147}
148BOOST_AUTO_TEST_SUITE_END()
149
150BOOST_AUTO_TEST_SUITE_END()
151BOOST_AUTO_TEST_SUITE_END()
152#endif