blob: dab8e4950904b1fcedb50d887b08ec7029eedeb7 [file] [log] [blame]
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "LogSoftmaxTestImpl.hpp"
7
8#include <Half.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010010#include <ResolveType.hpp>
11
12#include <armnn/ArmNN.hpp>
13
14#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000015#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010016#include <backendsCommon/WorkloadFactory.hpp>
17
Aron Virginas-Tare662a942019-10-14 15:12:00 +010018#include <backendsCommon/test/TensorCopyUtils.hpp>
19#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23namespace
24{
25
26template<armnn::DataType ArmnnType,
27 std::size_t NumDims,
28 typename T = armnn::ResolveType<ArmnnType>>
29LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
30 armnn::IWorkloadFactory& workloadFactory,
31 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
32 const armnn::TensorInfo& inputInfo,
33 const armnn::TensorInfo& outputInfo,
34 const std::vector<float>& inputValues,
35 const std::vector<float>& expectedOutputValues,
36 armnn::LogSoftmaxQueueDescriptor descriptor,
37 float qScale = 1.0f,
38 int32_t qOffset = 0)
39{
Derek Lambertic374ff02019-12-10 21:57:35 +000040 boost::ignore_unused(memoryManager);
Aron Virginas-Tare662a942019-10-14 15:12:00 +010041 LayerTestResult<T, NumDims> result(outputInfo);
42 result.outputExpected =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010043 MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010044
45 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
46 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
47
48 armnn::WorkloadInfo info;
49
50 AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
51 AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
52
53 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
54
55 inputHandle->Allocate();
56 outputHandle->Allocate();
57
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010058 auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010059 CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
60
61 workload->Execute();
62
63 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
64
65 return result;
66}
67
68} // anonymous namespace
69
70template<armnn::DataType ArmnnType, typename T>
71LayerTestResult<T, 4> LogSoftmaxTest1(
72 armnn::IWorkloadFactory& workloadFactory,
73 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
74{
75 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
76
77 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
78 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
79
80 std::vector<float> inputValues
81 {
82 0.f, -6.f, 2.f, 4.f,
83 3.f, -2.f, 10.f, 1.f
84 };
85
86 std::vector<float> expectedOutputValues
87 {
88 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
89 -7.00104f, -12.00104f, -0.00105f, -9.00104f
90 };
91
92 armnn::LogSoftmaxQueueDescriptor descriptor;
93 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
94 descriptor.m_Parameters.m_Axis = -1; // default axis
95
96 return LogSoftmaxTestImpl<ArmnnType, 4>(
97 workloadFactory,
98 memoryManager,
99 inputTensorInfo,
100 outputTensorInfo,
101 inputValues,
102 expectedOutputValues,
103 descriptor);
104}
105
106template<armnn::DataType ArmnnType, typename T>
107LayerTestResult<T, 4> LogSoftmaxTest2(
108 armnn::IWorkloadFactory& workloadFactory,
109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
110{
111 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
112
113 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
114 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
115
116 std::vector<float> inputValues
117 {
118 0.f, -6.f, 2.f, 4.f,
119 3.f, -2.f, 10.f, 1.f
120 };
121
122 std::vector<float> expectedOutputValues
123 {
124 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
125 -7.00104f, -12.00104f, -0.00105f, -9.00104f
126 };
127
128 armnn::LogSoftmaxQueueDescriptor descriptor;
129 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
130 descriptor.m_Parameters.m_Axis = 3; // positive axis
131
132 return LogSoftmaxTestImpl<ArmnnType, 4>(
133 workloadFactory,
134 memoryManager,
135 inputTensorInfo,
136 outputTensorInfo,
137 inputValues,
138 expectedOutputValues,
139 descriptor);
140}
141
142template<armnn::DataType ArmnnType, typename T>
143LayerTestResult<T, 4> LogSoftmaxTest3(
144 armnn::IWorkloadFactory& workloadFactory,
145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
146{
147 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
148
149 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
150 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
151
152 std::vector<float> inputValues
153 {
154 0.0f, -0.6f, 0.2f, 0.4f,
155 0.3f, -0.2f, 1.0f, 0.1f
156 };
157
158 std::vector<float> expectedOutputValues
159 {
160 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
161 -7.00104f, -12.00104f, -0.00105f, -9.00104f
162 };
163
164 armnn::LogSoftmaxQueueDescriptor descriptor;
165 descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
166 descriptor.m_Parameters.m_Axis = 3; // positive axis
167
168 return LogSoftmaxTestImpl<ArmnnType, 4>(
169 workloadFactory,
170 memoryManager,
171 inputTensorInfo,
172 outputTensorInfo,
173 inputValues,
174 expectedOutputValues,
175 descriptor);
176}
177
178template<armnn::DataType ArmnnType, typename T>
179LayerTestResult<T, 4> LogSoftmaxTest4(
180 armnn::IWorkloadFactory& workloadFactory,
181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
182{
183 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
184
185 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
186 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
187
188 std::vector<float> inputValues
189 {
190 0.f, -6.f, 2.f, 4.f,
191 3.f, -2.f, 10.f, 1.f
192 };
193
194 std::vector<float> expectedOutputValues
195 {
196 -3.048587f, -4.018149f, -8.000336f, -0.048587f,
197 -0.048587f, -0.018149f, -0.000335f, -3.048587f
198 };
199
200 armnn::LogSoftmaxQueueDescriptor descriptor;
201 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
202 descriptor.m_Parameters.m_Axis = -2; // negative axis
203
204 return LogSoftmaxTestImpl<ArmnnType, 4>(
205 workloadFactory,
206 memoryManager,
207 inputTensorInfo,
208 outputTensorInfo,
209 inputValues,
210 expectedOutputValues,
211 descriptor);
212}
213
214template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
215LogSoftmaxTest1<armnn::DataType::Float32>(
216 armnn::IWorkloadFactory& workloadFactory,
217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
218
219template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
220LogSoftmaxTest2<armnn::DataType::Float32>(
221 armnn::IWorkloadFactory& workloadFactory,
222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
223
224template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
225LogSoftmaxTest3<armnn::DataType::Float32>(
226 armnn::IWorkloadFactory& workloadFactory,
227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
228
229template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
230LogSoftmaxTest4<armnn::DataType::Float32>(
231 armnn::IWorkloadFactory& workloadFactory,
232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
233
234template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
235LogSoftmaxTest1<armnn::DataType::Float16>(
236 armnn::IWorkloadFactory& workloadFactory,
237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
238
239template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
240LogSoftmaxTest2<armnn::DataType::Float16>(
241 armnn::IWorkloadFactory& workloadFactory,
242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
243
244template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
245LogSoftmaxTest3<armnn::DataType::Float16>(
246 armnn::IWorkloadFactory& workloadFactory,
247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
248
249template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
250LogSoftmaxTest4<armnn::DataType::Float16>(
251 armnn::IWorkloadFactory& workloadFactory,
252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);