blob: 208bed24a577105e69a961ba3d7fd1484379350d [file] [log] [blame]
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "LogSoftmaxTestImpl.hpp"
7
8#include <Half.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010010#include <ResolveType.hpp>
11
Aron Virginas-Tare662a942019-10-14 15:12:00 +010012
13#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000014#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010015#include <backendsCommon/WorkloadFactory.hpp>
16
Aron Virginas-Tare662a942019-10-14 15:12:00 +010017#include <backendsCommon/test/TensorCopyUtils.hpp>
18#include <backendsCommon/test/WorkloadTestUtils.hpp>
19
20#include <test/TensorHelpers.hpp>
21
22namespace
23{
24
25template<armnn::DataType ArmnnType,
26 std::size_t NumDims,
27 typename T = armnn::ResolveType<ArmnnType>>
28LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29 armnn::IWorkloadFactory& workloadFactory,
30 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
31 const armnn::TensorInfo& inputInfo,
32 const armnn::TensorInfo& outputInfo,
33 const std::vector<float>& inputValues,
34 const std::vector<float>& expectedOutputValues,
35 armnn::LogSoftmaxQueueDescriptor descriptor,
36 float qScale = 1.0f,
37 int32_t qOffset = 0)
38{
Jan Eilers8eb25602020-03-09 12:13:48 +000039 IgnoreUnused(memoryManager);
Aron Virginas-Tare662a942019-10-14 15:12:00 +010040 LayerTestResult<T, NumDims> result(outputInfo);
41 result.outputExpected =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010042 MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010043
44 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
45 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
46
47 armnn::WorkloadInfo info;
48
49 AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
50 AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
51
52 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
53
54 inputHandle->Allocate();
55 outputHandle->Allocate();
56
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010057 auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010058 CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
59
60 workload->Execute();
61
62 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
63
64 return result;
65}
66
67} // anonymous namespace
68
69template<armnn::DataType ArmnnType, typename T>
70LayerTestResult<T, 4> LogSoftmaxTest1(
71 armnn::IWorkloadFactory& workloadFactory,
72 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
73{
74 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
75
76 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
77 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
78
79 std::vector<float> inputValues
80 {
81 0.f, -6.f, 2.f, 4.f,
82 3.f, -2.f, 10.f, 1.f
83 };
84
85 std::vector<float> expectedOutputValues
86 {
87 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
88 -7.00104f, -12.00104f, -0.00105f, -9.00104f
89 };
90
91 armnn::LogSoftmaxQueueDescriptor descriptor;
92 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
93 descriptor.m_Parameters.m_Axis = -1; // default axis
94
95 return LogSoftmaxTestImpl<ArmnnType, 4>(
96 workloadFactory,
97 memoryManager,
98 inputTensorInfo,
99 outputTensorInfo,
100 inputValues,
101 expectedOutputValues,
102 descriptor);
103}
104
105template<armnn::DataType ArmnnType, typename T>
106LayerTestResult<T, 4> LogSoftmaxTest2(
107 armnn::IWorkloadFactory& workloadFactory,
108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
109{
110 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
111
112 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
113 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
114
115 std::vector<float> inputValues
116 {
117 0.f, -6.f, 2.f, 4.f,
118 3.f, -2.f, 10.f, 1.f
119 };
120
121 std::vector<float> expectedOutputValues
122 {
123 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
124 -7.00104f, -12.00104f, -0.00105f, -9.00104f
125 };
126
127 armnn::LogSoftmaxQueueDescriptor descriptor;
128 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
129 descriptor.m_Parameters.m_Axis = 3; // positive axis
130
131 return LogSoftmaxTestImpl<ArmnnType, 4>(
132 workloadFactory,
133 memoryManager,
134 inputTensorInfo,
135 outputTensorInfo,
136 inputValues,
137 expectedOutputValues,
138 descriptor);
139}
140
141template<armnn::DataType ArmnnType, typename T>
142LayerTestResult<T, 4> LogSoftmaxTest3(
143 armnn::IWorkloadFactory& workloadFactory,
144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
145{
146 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
147
148 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
149 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
150
151 std::vector<float> inputValues
152 {
153 0.0f, -0.6f, 0.2f, 0.4f,
154 0.3f, -0.2f, 1.0f, 0.1f
155 };
156
157 std::vector<float> expectedOutputValues
158 {
159 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
160 -7.00104f, -12.00104f, -0.00105f, -9.00104f
161 };
162
163 armnn::LogSoftmaxQueueDescriptor descriptor;
164 descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
165 descriptor.m_Parameters.m_Axis = 3; // positive axis
166
167 return LogSoftmaxTestImpl<ArmnnType, 4>(
168 workloadFactory,
169 memoryManager,
170 inputTensorInfo,
171 outputTensorInfo,
172 inputValues,
173 expectedOutputValues,
174 descriptor);
175}
176
177template<armnn::DataType ArmnnType, typename T>
178LayerTestResult<T, 4> LogSoftmaxTest4(
179 armnn::IWorkloadFactory& workloadFactory,
180 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
181{
182 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
183
184 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
185 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
186
187 std::vector<float> inputValues
188 {
189 0.f, -6.f, 2.f, 4.f,
190 3.f, -2.f, 10.f, 1.f
191 };
192
193 std::vector<float> expectedOutputValues
194 {
195 -3.048587f, -4.018149f, -8.000336f, -0.048587f,
196 -0.048587f, -0.018149f, -0.000335f, -3.048587f
197 };
198
199 armnn::LogSoftmaxQueueDescriptor descriptor;
200 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
201 descriptor.m_Parameters.m_Axis = -2; // negative axis
202
203 return LogSoftmaxTestImpl<ArmnnType, 4>(
204 workloadFactory,
205 memoryManager,
206 inputTensorInfo,
207 outputTensorInfo,
208 inputValues,
209 expectedOutputValues,
210 descriptor);
211}
212
213template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
214LogSoftmaxTest1<armnn::DataType::Float32>(
215 armnn::IWorkloadFactory& workloadFactory,
216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
217
218template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
219LogSoftmaxTest2<armnn::DataType::Float32>(
220 armnn::IWorkloadFactory& workloadFactory,
221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
222
223template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
224LogSoftmaxTest3<armnn::DataType::Float32>(
225 armnn::IWorkloadFactory& workloadFactory,
226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
227
228template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
229LogSoftmaxTest4<armnn::DataType::Float32>(
230 armnn::IWorkloadFactory& workloadFactory,
231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
232
233template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
234LogSoftmaxTest1<armnn::DataType::Float16>(
235 armnn::IWorkloadFactory& workloadFactory,
236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
237
238template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
239LogSoftmaxTest2<armnn::DataType::Float16>(
240 armnn::IWorkloadFactory& workloadFactory,
241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
242
243template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
244LogSoftmaxTest3<armnn::DataType::Float16>(
245 armnn::IWorkloadFactory& workloadFactory,
246 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
247
248template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
249LogSoftmaxTest4<armnn::DataType::Float16>(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);