blob: 90b9b1f7e2a139ce6fb446f1030b6843937b6ab5 [file] [log] [blame]
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "SoftmaxLayer.h"
25
Georgios Pinitas583137c2017-08-31 18:12:42 +010026#include "arm_compute/core/Types.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010027#include "tests/validation/FixedPoint.h"
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010028
29namespace arm_compute
30{
31namespace test
32{
33namespace validation
34{
35namespace reference
36{
37template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010038SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010039{
40 // Create reference
41 SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
42
43 // Compute reference
44 const int cols = src.shape()[0];
45 const int upper_dims = src.num_elements() / cols;
46
47 for(int r = 0; r < upper_dims; ++r)
48 {
49 const T *src_row_ptr = src.data() + r * cols;
50 T *dst_row_ptr = dst.data() + r * cols;
51
52 // Find max
53 const T max = *std::max_element(src_row_ptr, src_row_ptr + cols);
54
55 // Regularize
56 T sum(0.f);
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010057 std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&sum, max, beta](T val)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010058 {
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010059 const T res(std::exp((val - max) * beta));
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010060 sum += res;
61 return res;
62 });
63
64 // Normalize
65 std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [sum](T val)
66 {
67 return val / sum;
68 });
69 }
70
71 return dst;
72}
73
74template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010075SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010076{
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010077 ARM_COMPUTE_UNUSED(beta);
78
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010079 using namespace fixed_point_arithmetic;
80
81 // Create reference
82 SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
83
84 // Compute reference
85 const int cols = src.shape()[0];
86 const int upper_dims = src.num_elements() / cols;
87
88 for(int r = 0; r < upper_dims; ++r)
89 {
90 const T *src_row_ptr = src.data() + r * cols;
91 T *dst_row_ptr = dst.data() + r * cols;
92
93 // Find max
94 const fixed_point<T> max(*std::max_element(src_row_ptr, src_row_ptr + cols), src.fixed_point_position(), true);
95
96 // Regularize
97 using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
98 fixed_point<promoted_type> sum(0, src.fixed_point_position(), true);
99 std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&](T val)
100 {
101 const fixed_point<T> res = exp(fixed_point<T>(val, src.fixed_point_position(), true) - max);
102 sum = add(sum, fixed_point<promoted_type>(res.raw(), src.fixed_point_position(), true));
103 return res.raw();
104 });
105
106 // Normalize
107 fixed_point<T> saturated_sum(sum);
108 std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [&](T val)
109 {
110 return div(fixed_point<T>(val, src.fixed_point_position(), true), saturated_sum).raw();
111 });
112 }
113
114 return dst;
115}
116
Chunosovf450caa2017-11-08 16:09:35 +0700117template <>
Pablo Palmiera2b89ca2017-10-05 15:01:34 +0100118SimpleTensor<uint8_t> softmax_layer<uint8_t>(const SimpleTensor<uint8_t> &src, float beta)
Chunosovf450caa2017-11-08 16:09:35 +0700119{
120 // Note: Output quantization info should always have scale = 1/256 and offset = 0
121 const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0);
122
123 SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
Pablo Palmiera2b89ca2017-10-05 15:01:34 +0100124 SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta);
Chunosovf450caa2017-11-08 16:09:35 +0700125 SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, output_quantization_info);
126 return dst;
127}
128
Pablo Palmiera2b89ca2017-10-05 15:01:34 +0100129template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
130template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
131template SimpleTensor<qint8_t> softmax_layer(const SimpleTensor<qint8_t> &src, float beta);
132template SimpleTensor<qint16_t> softmax_layer(const SimpleTensor<qint16_t> &src, float beta);
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +0100133} // namespace reference
134} // namespace validation
135} // namespace test
136} // namespace arm_compute