blob: 18a5bd035cb23c0551a8644813b4d8d91dcc9034 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "LayerTestResult.hpp"
9
10#include <ResolveType.hpp>
11
12#include <armnn/ArmNN.hpp>
13
14#include <backendsCommon/IBackendInternal.hpp>
15#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <backendsCommon/test/TensorCopyUtils.hpp>
18#include <backendsCommon/test/WorkloadTestUtils.hpp>
19
20#include <test/TensorHelpers.hpp>
21
22template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23LayerTestResult<T, 4> PreluTest(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
26{
27 armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
28 armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
29 armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
30
31 if (armnn::IsQuantizedType<T>())
32 {
33 inputTensorInfo.SetQuantizationScale(0.25f);
34 inputTensorInfo.SetQuantizationOffset(128);
35 alphaTensorInfo.SetQuantizationScale(0.25f);
36 alphaTensorInfo.SetQuantizationOffset(50);
37 outputTensorInfo.SetQuantizationScale(0.5f);
38 outputTensorInfo.SetQuantizationOffset(120);
39 }
40
41 std::vector<float> inputData
42 {
43 // Expected quantized values:
44 // 128, 128, 128, 132, 132, 132, 124, 124, 124, 120, 120, 120
45 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -2.0f, -2.0f, -2.0f
46 };
47 std::vector<float> alphaData
48 {
49 // Expected quantized values:
50 // 50, 54, 58
51 0.0f, 1.0f, 2.0f
52 };
53 std::vector<float> outputExpectedData =
54 {
55 // Expected quantized values:
56 // 20, 120, 120, 122, 122, 122, 120, 118, 116, 120, 116, 112
57 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
58 };
59
60 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
61 inputTensorInfo.GetQuantizationOffset(),
62 inputData));
63 auto alpha = MakeTensor<T, 4>(alphaTensorInfo, QuantizedVector<T>(alphaTensorInfo.GetQuantizationScale(),
64 alphaTensorInfo.GetQuantizationOffset(),
65 alphaData));
66
67 LayerTestResult<T, 4> result(outputTensorInfo);
68 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
69 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
70 outputTensorInfo.GetQuantizationOffset(),
71 outputExpectedData));
72
73 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
74 std::unique_ptr <armnn::ITensorHandle> alphaHandle = workloadFactory.CreateTensorHandle(alphaTensorInfo);
75 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
76
77 armnn::PreluQueueDescriptor descriptor;
78 armnn::WorkloadInfo info;
79 AddInputToWorkload (descriptor, info, inputTensorInfo, inputHandle.get());
80 AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get());
81 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
82
83 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePrelu(descriptor, info);
84
85 inputHandle->Allocate();
86 alphaHandle->Allocate();
87 outputHandle->Allocate();
88
89 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
90 CopyDataToITensorHandle(alphaHandle.get(), &alpha[0][0][0][0]);
91
92 workload->Execute();
93
94 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
95
96 return result;
97}