blob: 979e36a94ecc2e5de6d6a1c3387a58e9f6162062 [file] [log] [blame]
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001//
Keith Davis69e653f2020-07-02 11:49:26 +01002// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tare662a942019-10-14 15:12:00 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "LogSoftmaxTestImpl.hpp"
7
8#include <Half.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010010#include <ResolveType.hpp>
11
Aron Virginas-Tare662a942019-10-14 15:12:00 +010012
13#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000014#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tare662a942019-10-14 15:12:00 +010015#include <backendsCommon/WorkloadFactory.hpp>
16
Aron Virginas-Tare662a942019-10-14 15:12:00 +010017#include <backendsCommon/test/TensorCopyUtils.hpp>
18#include <backendsCommon/test/WorkloadTestUtils.hpp>
19
20#include <test/TensorHelpers.hpp>
21
22namespace
23{
24
25template<armnn::DataType ArmnnType,
26 std::size_t NumDims,
27 typename T = armnn::ResolveType<ArmnnType>>
28LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
29 armnn::IWorkloadFactory& workloadFactory,
30 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
31 const armnn::TensorInfo& inputInfo,
32 const armnn::TensorInfo& outputInfo,
33 const std::vector<float>& inputValues,
34 const std::vector<float>& expectedOutputValues,
35 armnn::LogSoftmaxQueueDescriptor descriptor,
36 float qScale = 1.0f,
37 int32_t qOffset = 0)
38{
Jan Eilers8eb25602020-03-09 12:13:48 +000039 IgnoreUnused(memoryManager);
Aron Virginas-Tare662a942019-10-14 15:12:00 +010040 LayerTestResult<T, NumDims> result(outputInfo);
41 result.outputExpected =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010042 MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010043
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010044 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tare662a942019-10-14 15:12:00 +010045 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
46 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010047 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tare662a942019-10-14 15:12:00 +010048
49 armnn::WorkloadInfo info;
50
51 AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
52 AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
53
54 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
55
56 inputHandle->Allocate();
57 outputHandle->Allocate();
58
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010059 auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tare662a942019-10-14 15:12:00 +010060 CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
61
Keith Davis69e653f2020-07-02 11:49:26 +010062 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tare662a942019-10-14 15:12:00 +010063
64 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
65
66 return result;
67}
68
69} // anonymous namespace
70
71template<armnn::DataType ArmnnType, typename T>
72LayerTestResult<T, 4> LogSoftmaxTest1(
73 armnn::IWorkloadFactory& workloadFactory,
74 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
75{
76 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
77
78 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
79 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
80
81 std::vector<float> inputValues
82 {
83 0.f, -6.f, 2.f, 4.f,
84 3.f, -2.f, 10.f, 1.f
85 };
86
87 std::vector<float> expectedOutputValues
88 {
89 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
90 -7.00104f, -12.00104f, -0.00105f, -9.00104f
91 };
92
93 armnn::LogSoftmaxQueueDescriptor descriptor;
94 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
95 descriptor.m_Parameters.m_Axis = -1; // default axis
96
97 return LogSoftmaxTestImpl<ArmnnType, 4>(
98 workloadFactory,
99 memoryManager,
100 inputTensorInfo,
101 outputTensorInfo,
102 inputValues,
103 expectedOutputValues,
104 descriptor);
105}
106
107template<armnn::DataType ArmnnType, typename T>
108LayerTestResult<T, 4> LogSoftmaxTest2(
109 armnn::IWorkloadFactory& workloadFactory,
110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
111{
112 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
113
114 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
115 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
116
117 std::vector<float> inputValues
118 {
119 0.f, -6.f, 2.f, 4.f,
120 3.f, -2.f, 10.f, 1.f
121 };
122
123 std::vector<float> expectedOutputValues
124 {
125 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
126 -7.00104f, -12.00104f, -0.00105f, -9.00104f
127 };
128
129 armnn::LogSoftmaxQueueDescriptor descriptor;
130 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
131 descriptor.m_Parameters.m_Axis = 3; // positive axis
132
133 return LogSoftmaxTestImpl<ArmnnType, 4>(
134 workloadFactory,
135 memoryManager,
136 inputTensorInfo,
137 outputTensorInfo,
138 inputValues,
139 expectedOutputValues,
140 descriptor);
141}
142
143template<armnn::DataType ArmnnType, typename T>
144LayerTestResult<T, 4> LogSoftmaxTest3(
145 armnn::IWorkloadFactory& workloadFactory,
146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
147{
148 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
149
150 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
151 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
152
153 std::vector<float> inputValues
154 {
155 0.0f, -0.6f, 0.2f, 0.4f,
156 0.3f, -0.2f, 1.0f, 0.1f
157 };
158
159 std::vector<float> expectedOutputValues
160 {
161 -4.14297f, -10.14297f, -2.14297f, -0.14297f,
162 -7.00104f, -12.00104f, -0.00105f, -9.00104f
163 };
164
165 armnn::LogSoftmaxQueueDescriptor descriptor;
166 descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
167 descriptor.m_Parameters.m_Axis = 3; // positive axis
168
169 return LogSoftmaxTestImpl<ArmnnType, 4>(
170 workloadFactory,
171 memoryManager,
172 inputTensorInfo,
173 outputTensorInfo,
174 inputValues,
175 expectedOutputValues,
176 descriptor);
177}
178
179template<armnn::DataType ArmnnType, typename T>
180LayerTestResult<T, 4> LogSoftmaxTest4(
181 armnn::IWorkloadFactory& workloadFactory,
182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
183{
184 const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
185
186 armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
187 armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
188
189 std::vector<float> inputValues
190 {
191 0.f, -6.f, 2.f, 4.f,
192 3.f, -2.f, 10.f, 1.f
193 };
194
195 std::vector<float> expectedOutputValues
196 {
197 -3.048587f, -4.018149f, -8.000336f, -0.048587f,
198 -0.048587f, -0.018149f, -0.000335f, -3.048587f
199 };
200
201 armnn::LogSoftmaxQueueDescriptor descriptor;
202 descriptor.m_Parameters.m_Beta = 1.0f; // default beta
203 descriptor.m_Parameters.m_Axis = -2; // negative axis
204
205 return LogSoftmaxTestImpl<ArmnnType, 4>(
206 workloadFactory,
207 memoryManager,
208 inputTensorInfo,
209 outputTensorInfo,
210 inputValues,
211 expectedOutputValues,
212 descriptor);
213}
214
215template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
216LogSoftmaxTest1<armnn::DataType::Float32>(
217 armnn::IWorkloadFactory& workloadFactory,
218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
219
220template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
221LogSoftmaxTest2<armnn::DataType::Float32>(
222 armnn::IWorkloadFactory& workloadFactory,
223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
224
225template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
226LogSoftmaxTest3<armnn::DataType::Float32>(
227 armnn::IWorkloadFactory& workloadFactory,
228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
229
230template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
231LogSoftmaxTest4<armnn::DataType::Float32>(
232 armnn::IWorkloadFactory& workloadFactory,
233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
234
235template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
236LogSoftmaxTest1<armnn::DataType::Float16>(
237 armnn::IWorkloadFactory& workloadFactory,
238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
239
240template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
241LogSoftmaxTest2<armnn::DataType::Float16>(
242 armnn::IWorkloadFactory& workloadFactory,
243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
244
245template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
246LogSoftmaxTest3<armnn::DataType::Float16>(
247 armnn::IWorkloadFactory& workloadFactory,
248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
249
250template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
251LogSoftmaxTest4<armnn::DataType::Float16>(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);