blob: 58840e3076f582cb84b2c79c8b34ab0bdc0e99bf [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#include "Softmax.hpp"
7
8#include <cmath>
9#include <vector>
10
11namespace armnn
12{
13
14/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo
15void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta)
16{
17 unsigned int numChannels = tensorInfo.GetShape()[1];
18 for (unsigned int n = 0; n < tensorInfo.GetShape()[0]; n++)
19 {
20 // find maximum channel
21 float max = in[n * numChannels];
22 for (unsigned int c = 1; c < numChannels; c++)
23 {
24 float val = in[n * numChannels + c];
25 if (val > max)
26 {
27 max = val;
28 }
29 }
30
31 // exponentiate all values and sum
32 std::vector<float> exponentials(numChannels);
33 float sum = 0.0f;
34 for (unsigned int c = 0; c < numChannels; c++)
35 {
36 float val = in[n * numChannels + c];
37 exponentials[c] = expf((val - max) * beta);
38 sum += exponentials[c];
39 }
40
41 // divide exponentials by sum to give outputs
42 for (unsigned int c = 0; c < numChannels; c++)
43 {
44 out[n * numChannels + c] = exponentials[c] / sum;
45 }
46 }
47}
48
49} //namespace armnn