blob: c0b62aa640066efd0ff2aed69d3c36b7be97b0c4 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "SoftmaxTestImpl.hpp"
7
8#include <ResolveType.hpp>
9
10#include <armnn/ArmNN.hpp>
11
12#include <backendsCommon/CpuTensorHandle.hpp>
13
14#include <backendsCommon/test/QuantizeHelper.hpp>
15#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
19
20#include <algorithm>
21
22namespace
23{
24
25struct Simple3dSoftmaxOutputData
26{
27 const std::vector<float> outputData =
28 {
29 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
30 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
31 };
32
33 const armnn::TensorShape inputShape{ 1, 8, 1 };
34
35 const std::vector<float> inputData =
36 {
37 0.0f, 1.0f, 0.0f, 0.0f,
38 0.5f, 0.0f, 0.0f, 0.0f,
39 };
40};
41
42struct Simple4dSoftmaxData
43{
44 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
45
46 const std::vector<float> outputData =
47 {
48 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
49 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
50 };
51
52 const std::vector<float> inputData =
53 {
54 0.0f, 1.0f, 0.0f, 0.0f,
55 0.5f, 0.0f, 0.0f, 0.0f
56 };
57};
58
59template<armnn::DataType ArmnnType, std::size_t n, typename T = armnn::ResolveType<ArmnnType>>
60LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
61 armnn::IWorkloadFactory& workloadFactory,
62 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
63 float beta,
64 const armnn::TensorShape& inputShape,
65 const std::vector<float>& outputData,
66 const std::vector<float>& inputData,
67 int axis = 1)
68{
69 using std::exp;
70
71 const float qScale = 1.f / 256.f;
72 const int qOffset = 0;
73
74 armnn::TensorInfo inputTensorInfo;
75 armnn::TensorInfo outputTensorInfo;
76
77 inputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
78 inputTensorInfo.SetQuantizationScale(qScale);
79 inputTensorInfo.SetQuantizationOffset(qOffset);
80
81 outputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
82 outputTensorInfo.SetQuantizationScale(qScale);
83 outputTensorInfo.SetQuantizationOffset(qOffset);
84
85 LayerTestResult<T, n> ret(outputTensorInfo);
86
87 // Each row is independently softmax'd.
88 auto input = MakeTensor<T, n>(inputTensorInfo, std::vector<T>(
89 QuantizedVector<T>(qScale, qOffset, inputData)));
90
91 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
92 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
93
94 armnn::SoftmaxQueueDescriptor data;
95 data.m_Parameters.m_Beta = beta;
96 data.m_Parameters.m_Axis = axis;
97
98 armnn::WorkloadInfo info;
99 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
100 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
101
102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
103
104 inputHandle->Allocate();
105 outputHandle->Allocate();
106 CopyDataToITensorHandle(inputHandle.get(), input.origin());
107
108 BOOST_ASSERT(workload);
109
110 ExecuteWorkload(*workload, memoryManager);
111
112 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
113
114 std::vector<T> expectedOutput = std::vector<T>(
115 QuantizedVector<T>(qScale, qOffset, outputData));
116 ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
117
118 return ret;
119}
120
121template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
122LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
123 armnn::IWorkloadFactory& workloadFactory,
124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
125 float beta)
126{
127 using std::exp;
128 const armnn::TensorShape inputShape{ 2, 4 };
129
130 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
131 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
132 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
133 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
134 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
135 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
136
137 const std::vector<float> outputData = { x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
138 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 };
139
140 const std::vector<float> inputData =
141 {
142 0.f, 1.f, 0.f, 0.f,
143 .5f, 0.f, 0.f, 0.f,
144 };
145
146 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
147 inputShape, outputData, inputData);
148}
149
150template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
151LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
152 armnn::IWorkloadFactory& workloadFactory,
153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
154 float beta,
155 int axis)
156{
157 armnn::TensorShape inputShape;
158 std::vector<float> inputData;
159 std::vector<float> outputData;
160 switch (axis)
161 {
162 case -2:
163 case 0:
164 {
165 inputShape = {5, 2};
166
167 inputData =
168 {
169 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
170 };
171
172 outputData =
173 {
174 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
175 0.087144312427294f,
176 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
177 7.246299848982885e-08f
178 };
179 break;
180 }
181 case -1:
182 case 1:
183 {
184 inputShape = {2, 5};
185
186 inputData =
187 {
188 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
189 };
190
191 outputData =
192 {
193 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
194 7.246299848982885e-08f,
195 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
196 7.246299848982885e-08f
197 };
198 break;
199 }
200 }
201 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
202 inputShape, outputData, inputData, axis);
203}
204
205template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
206LayerTestResult<T, 3> Simple3dSoftmaxTestImpl(
207 armnn::IWorkloadFactory& workloadFactory,
208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
209 float beta,
210 const armnn::TensorShape& inputShape,
211 const std::vector<float>& outputData,
212 const std::vector<float>& inputData,
213 int axis = 1)
214{
215 return SimpleSoftmaxBaseTestImpl<ArmnnType, 3>(workloadFactory, memoryManager, beta,
216 inputShape, outputData, inputData, axis);
217}
218
219template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
220LayerTestResult<T, 4> Simple4dSoftmaxTestImpl(
221 armnn::IWorkloadFactory& workloadFactory,
222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
223 float beta,
224 const armnn::TensorShape& inputShape,
225 const std::vector<float>& outputData,
226 const std::vector<float>& inputData,
227 int axis = 1)
228{
229
230 return SimpleSoftmaxBaseTestImpl<ArmnnType, 4>(workloadFactory, memoryManager, beta,
231 inputShape, outputData, inputData, axis);
232}
233
234template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
235LayerTestResult<T, 2> CompareSoftmaxTestImpl(
236 armnn::IWorkloadFactory& workloadFactory,
237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
238 armnn::IWorkloadFactory& refWorkloadFactory,
239 float beta)
240{
241
242 const int batchSize = 20;
243 const int channels = 30;
244
245 armnn::TensorInfo inputTensorInfo;
246 armnn::TensorInfo outputTensorInfo;
247
248 unsigned int inputShape[] = { batchSize, channels };
249
250 inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
251 outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
252 float qScale = 1.f / 256.f;
253 int qOffset = 0;
254 inputTensorInfo.SetQuantizationScale(qScale);
255 inputTensorInfo.SetQuantizationOffset(qOffset);
256 outputTensorInfo.SetQuantizationScale(qScale);
257 outputTensorInfo.SetQuantizationOffset(qOffset);
258
259
260 LayerTestResult<T, 2> ret(outputTensorInfo);
261 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
262
263 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
264 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
265
266 armnn::SoftmaxQueueDescriptor data;
267 data.m_Parameters.m_Beta = beta;
268
269 armnn::WorkloadInfo info;
270 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
271 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
272
273 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
274 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
275
276
277 armnn::SoftmaxQueueDescriptor refData = data;
278 armnn::WorkloadInfo refInfo = info;
279 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
280 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
281
282 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
283 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
284
285 outputHandleRef->Allocate();
286 inputHandleRef->Allocate();
287
288 inputHandle->Allocate();
289 outputHandle->Allocate();
290
291 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
292 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
293
294 ExecuteWorkload(*workload, memoryManager);
295
296 workloadRef->Execute();
297
298 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
299 CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
300
301 return ret;
302}
303
304} // anonymous namespace
305
306LayerTestResult<float,2> SimpleSoftmaxTest(
307 armnn::IWorkloadFactory& workloadFactory,
308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
309 float beta)
310{
311 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
312}
313
314LayerTestResult<float,2> SimpleAxisSoftmaxTest(
315 armnn::IWorkloadFactory& workloadFactory,
316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
317 float beta,
318 int axis)
319{
320 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
321}
322
323LayerTestResult<float,3> Simple3dSoftmaxTest(
324 armnn::IWorkloadFactory& workloadFactory,
325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
326 float beta)
327{
328 Simple3dSoftmaxOutputData data;
329 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
330 data.inputShape, data.outputData, data.inputData);
331}
332
333LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
334 armnn::IWorkloadFactory& workloadFactory,
335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
336 float beta,
337 int axis)
338{
339 armnn::TensorShape inputShape;
340 std::vector<float> inputData;
341 std::vector<float> outputData;
342 switch (axis)
343 {
344 case -3:
345 case 0:
346 {
347 inputShape = {5, 2, 2};
348
349 inputData =
350 {
351 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
352
353 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
354 };
355
356 outputData =
357 {
358 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
359 0.236882800924671f,
360 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
361 0.087144312427294f,
362
363 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
364 0.032058600957022f,
365 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
366 7.246299848982885e-08f
367 };
368 break;
369 }
370 case -2:
371 case 1:
372 {
373 inputShape = {2, 5, 2};
374
375 inputData =
376 {
377 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
378
379 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
380 };
381
382 outputData =
383 {
384 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
385 0.087144312427294f,
386 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
387 7.246299848982885e-08f,
388
389 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
390 0.087144312427294f,
391 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
392 7.246299848982885e-08f
393 };
394 break;
395 }
396 case -1:
397 case 2:
398 {
399 inputShape = {2, 2, 5};
400
401 inputData =
402 {
403 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
404 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
405 };
406
407 outputData =
408 {
409 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
410 7.246299848982885e-08f,
411 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
412 7.246299848982885e-08f,
413
414 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
415 7.246299848982885e-08f,
416 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
417 7.246299848982885e-08f
418 };
419 break;
420 }
421 }
422
423 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
424 inputShape, outputData, inputData, axis);
425}
426
427LayerTestResult<float,4> Simple4dSoftmaxTest(
428 armnn::IWorkloadFactory& workloadFactory,
429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
430 float beta)
431{
432 Simple4dSoftmaxData data;
433 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
434 data.outputData, data.inputData);
435}
436
437LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
438 armnn::IWorkloadFactory& workloadFactory,
439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
440 float beta,
441 int axis)
442{
443 armnn::TensorShape inputShape;
444 std::vector<float> inputData;
445 std::vector<float> outputData;
446 switch (axis)
447 {
448 case -4:
449 case 0:
450 {
451 inputShape = {5, 2, 2, 2};
452
453 inputData =
454 {
455 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
456 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
457 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
458 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
459 };
460
461 outputData =
462 {
463 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
464 0.643914213228014f,
465 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
466 0.236882800924671f,
467 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
468 0.236882800924671f,
469 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
470 0.087144312427294f,
471
472 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
473 0.032058600957022f,
474 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
475 0.032058600957022f,
476 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
477 7.246299848982885e-08f,
478 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
479 7.246299848982885e-08f, 7.246299848982885e-08f
480 };
481 break;
482 }
483 case -3:
484 case 1:
485 {
486 inputShape = {2, 5, 2, 2};
487
488 inputData =
489 {
490 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
491 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
492 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
493 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
494 };
495
496 outputData =
497 {
498 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
499 0.236882800924671f,
500 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
501 0.087144312427294f,
502 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
503 0.032058600957022f,
504 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
505 7.246299848982885e-08f,
506
507
508 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
509 0.236882800924671f,
510 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
511 0.087144312427294f,
512 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
513 0.032058600957022f,
514 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
515 7.246299848982885e-08f
516 };
517 break;
518 }
519 case -2:
520 case 2:
521 {
522 inputShape = {2, 2, 5, 2};
523
524 inputData =
525 {
526 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
527 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
528 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
529 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
530 };
531
532 outputData =
533 {
534 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
535 0.087144312427294f,
536 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
537 7.246299848982885e-08f,
538 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
539 0.087144312427294f,
540 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
541 7.246299848982885e-08f,
542
543 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
544 0.087144312427294f,
545 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
546 7.246299848982885e-08f,
547 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
548 0.087144312427294f,
549 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
550 7.246299848982885e-08f
551 };
552 break;
553 }
554 case -1:
555 case 3:
556 {
557 inputShape = {2, 2, 2, 5};
558
559 inputData =
560 {
561 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
562 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
563 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
564 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
565 };
566
567 outputData =
568 {
569 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
570 7.246299848982885e-08f,
571 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
572 7.246299848982885e-08f,
573 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
574 7.246299848982885e-08f,
575 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
576 7.246299848982885e-08f,
577
578 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
579 7.246299848982885e-08f,
580 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
581 7.246299848982885e-08f,
582 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
583 7.246299848982885e-08f,
584 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
585 7.246299848982885e-08f
586 };
587 break;
588 }
589 }
590
591 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(
592 workloadFactory,
593 memoryManager,
594 beta,
595 inputShape,
596 outputData,
597 inputData,
598 axis);
599}
600
601LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
602 armnn::IWorkloadFactory& workloadFactory,
603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
604 float beta)
605{
606 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
607}
608
609LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
610 armnn::IWorkloadFactory& workloadFactory,
611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
612 float beta)
613{
614 Simple3dSoftmaxOutputData data;
615 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
616 workloadFactory,
617 memoryManager,
618 beta,
619 data.inputShape,
620 data.outputData,
621 data.inputData);
622}
623
624LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
625 armnn::IWorkloadFactory& workloadFactory,
626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
627 float beta)
628{
629 Simple4dSoftmaxData data;
630
631 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
632 data.inputShape, data.outputData, data.inputData);
633}
634
Matthew Jackson9bff1442019-09-12 09:08:23 +0100635LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test(
636 armnn::IWorkloadFactory& workloadFactory,
637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638 float beta)
639{
640 return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta);
641}
642
643LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test(
644 armnn::IWorkloadFactory& workloadFactory,
645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
646 float beta)
647{
648 Simple3dSoftmaxOutputData data;
649 return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
650 data.inputShape, data.outputData, data.inputData);
651}
652
653LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656 float beta)
657{
658 Simple4dSoftmaxData data;
659 return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
660 data.inputShape, data.outputData, data.inputData);
661}
662
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100663LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
664 armnn::IWorkloadFactory& workloadFactory,
665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666 float beta)
667{
668 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
669}
670
671LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
672 armnn::IWorkloadFactory& workloadFactory,
673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
674 float beta)
675{
676 Simple3dSoftmaxOutputData data;
677 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
678 data.inputShape, data.outputData, data.inputData);
679}
680
681LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
682 armnn::IWorkloadFactory& workloadFactory,
683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
684 float beta)
685{
686 Simple4dSoftmaxData data;
687
688 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
689 data.inputShape, data.outputData, data.inputData);
690}
691
692LayerTestResult<float,2> CompareSoftmaxTest(
693 armnn::IWorkloadFactory& workloadFactory,
694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
695 armnn::IWorkloadFactory& refWorkloadFactory,
696 float beta)
697{
698 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
699 workloadFactory, memoryManager, refWorkloadFactory, beta);
700}
701
702LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
703 armnn::IWorkloadFactory& workloadFactory,
704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
705 armnn::IWorkloadFactory& refWorkloadFactory,
706 float beta)
707{
708 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
709 workloadFactory, memoryManager, refWorkloadFactory, beta);
710}