blob: 3cf85817c83996375cb6c9e3b36be0b9cdad6b55 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "LayerTestResult.hpp"
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011#include <ResolveType.hpp>
12
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000014#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <backendsCommon/test/TensorCopyUtils.hpp>
Francis Murtagh623069d2020-08-14 17:24:39 +010018#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
Finn Williams826a5432020-08-27 16:15:20 +010023template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010024LayerTestResult<T, 4> PreluTest(
25 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +010026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028{
Jan Eilers8eb25602020-03-09 12:13:48 +000029 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +000030
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010031 armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
32 armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
33 armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
34
35 if (armnn::IsQuantizedType<T>())
36 {
37 inputTensorInfo.SetQuantizationScale(0.25f);
38 inputTensorInfo.SetQuantizationOffset(128);
39 alphaTensorInfo.SetQuantizationScale(0.25f);
40 alphaTensorInfo.SetQuantizationOffset(50);
41 outputTensorInfo.SetQuantizationScale(0.5f);
42 outputTensorInfo.SetQuantizationOffset(120);
43 }
44
45 std::vector<float> inputData
46 {
47 // Expected quantized values:
48 // 128, 128, 128, 132, 132, 132, 124, 124, 124, 120, 120, 120
49 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -2.0f, -2.0f, -2.0f
50 };
51 std::vector<float> alphaData
52 {
53 // Expected quantized values:
54 // 50, 54, 58
55 0.0f, 1.0f, 2.0f
56 };
57 std::vector<float> outputExpectedData =
58 {
59 // Expected quantized values:
60 // 20, 120, 120, 122, 122, 122, 120, 118, 116, 120, 116, 112
61 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
62 };
63
Sadik Armagan483c8112021-06-01 09:24:52 +010064 std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData,
65 inputTensorInfo.GetQuantizationScale(),
66 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010067
Sadik Armagan483c8112021-06-01 09:24:52 +010068 std::vector<T> alpha = armnnUtils::QuantizedVector<T>(alphaData,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010069 alphaTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +010070 alphaTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010071
Sadik Armagan483c8112021-06-01 09:24:52 +010072 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
73 std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData,
74 outputTensorInfo.GetQuantizationScale(),
75 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010076
Francis Murtagh623069d2020-08-14 17:24:39 +010077 std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr <armnn::ITensorHandle> alphaHandle = tensorHandleFactory.CreateTensorHandle(alphaTensorInfo);
79 std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010080
81 armnn::PreluQueueDescriptor descriptor;
82 armnn::WorkloadInfo info;
83 AddInputToWorkload (descriptor, info, inputTensorInfo, inputHandle.get());
84 AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get());
85 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
86
87 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePrelu(descriptor, info);
88
89 inputHandle->Allocate();
90 alphaHandle->Allocate();
91 outputHandle->Allocate();
92
Sadik Armagan483c8112021-06-01 09:24:52 +010093 CopyDataToITensorHandle(inputHandle.get(), input.data());
94 CopyDataToITensorHandle(alphaHandle.get(), alpha.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010095
96 workload->Execute();
97
Sadik Armagan483c8112021-06-01 09:24:52 +010098 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010099
Sadik Armagan483c8112021-06-01 09:24:52 +0100100 return LayerTestResult<T, 4>(actualOutput,
101 expectedOutput,
102 outputHandle->GetShape(),
103 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100104}