blob: f1b94c0a02c455e98064bccdd2ee8b10d7259b20 [file] [log] [blame]
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +01001/*
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +01002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "SoftmaxLayer.h"
25
Georgios Pinitas583137c2017-08-31 18:12:42 +010026#include "arm_compute/core/Types.h"
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010027
28namespace arm_compute
29{
30namespace test
31{
32namespace validation
33{
34namespace reference
35{
36template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
giuros01efbf6c82018-09-03 09:53:53 +010037SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010038{
39 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010040 SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010041
giuros01efbf6c82018-09-03 09:53:53 +010042 // Compute reference. Lower dims are the collapsing of the first axis
43 // dimensions (i.e., the flattened dimension of each batch). The upper dims are
44 // instead the batches we want to normalize
Giuseppe Rossini87e896a2018-08-24 10:24:12 +010045
giuros01efbf6c82018-09-03 09:53:53 +010046 int lower_dims = 1;
47 for(size_t i = 0; i < axis; i++)
48 {
49 lower_dims *= src.shape()[i];
50 }
51
52 int upper_dims = 1;
53 for(size_t i = axis; i < TensorShape::num_max_dimensions; i++)
54 {
55 upper_dims *= src.shape()[i];
56 }
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010057
58 for(int r = 0; r < upper_dims; ++r)
59 {
Giuseppe Rossini87e896a2018-08-24 10:24:12 +010060 const T *src_row_ptr = src.data() + r * lower_dims;
61 T *dst_row_ptr = dst.data() + r * lower_dims;
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010062
63 // Find max
Giuseppe Rossini87e896a2018-08-24 10:24:12 +010064 const T max = *std::max_element(src_row_ptr, src_row_ptr + lower_dims);
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010065
66 // Regularize
67 T sum(0.f);
Giuseppe Rossini87e896a2018-08-24 10:24:12 +010068 std::transform(src_row_ptr, src_row_ptr + lower_dims, dst_row_ptr, [&sum, max, beta](T val)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010069 {
Pablo Palmiera2b89ca2017-10-05 15:01:34 +010070 const T res(std::exp((val - max) * beta));
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010071 sum += res;
72 return res;
73 });
74
75 // Normalize
Giuseppe Rossini87e896a2018-08-24 10:24:12 +010076 std::transform(dst_row_ptr, dst_row_ptr + lower_dims, dst_row_ptr, [sum](T val)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010077 {
78 return val / sum;
79 });
80 }
81
82 return dst;
83}
84
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010085template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type>
giuros01efbf6c82018-09-03 09:53:53 +010086SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +010087{
Chunosovf450caa2017-11-08 16:09:35 +070088 // Note: Output quantization info should always have scale = 1/256 and offset = 0
89 const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0);
90
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010091 SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
giuros01efbf6c82018-09-03 09:53:53 +010092 SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, axis);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010093 SimpleTensor<T> dst = convert_to_asymmetric(dst_tmp, output_quantization_info);
Chunosovf450caa2017-11-08 16:09:35 +070094 return dst;
95}
96
giuros01efbf6c82018-09-03 09:53:53 +010097template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, size_t axis);
98template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, size_t axis);
99template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, size_t axis);
Moritz Pflanzerf6ad98a2017-07-21 17:19:58 +0100100} // namespace reference
101} // namespace validation
102} // namespace test
103} // namespace arm_compute