blob: bea4ec205e7649fe55aafe68611de1d6cdf19c2f [file] [log] [blame]
josh minor4a3c6102020-01-06 16:40:46 -06001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "LayerTestResult.hpp"
9
10#include <armnn/ArmNN.hpp>
11
12#include <ResolveType.hpp>
13
14#include <armnn/backends/IBackendInternal.hpp>
15#include <backendsCommon/Workload.hpp>
16#include <backendsCommon/WorkloadData.hpp>
17#include <backendsCommon/WorkloadFactory.hpp>
18
19#include <backendsCommon/test/DataTypeUtils.hpp>
20#include <backendsCommon/test/TensorCopyUtils.hpp>
21#include <backendsCommon/test/WorkloadTestUtils.hpp>
22
23#include <test/TensorHelpers.hpp>
24
25#include <memory>
26
27std::unique_ptr<armnn::IWorkload> CreateWorkload(
28 const armnn::IWorkloadFactory& workloadFactory,
29 const armnn::WorkloadInfo& info,
30 const armnn::ElementwiseUnaryQueueDescriptor& descriptor);
31
32template <std::size_t NumDims,
33 armnn::DataType ArmnnType,
34 typename T = armnn::ResolveType<ArmnnType>>
35LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
36 armnn::IWorkloadFactory & workloadFactory,
37 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
38 armnn::UnaryOperation op,
39 const unsigned int shape[NumDims],
40 std::vector<float> values,
41 float quantScale,
42 int quantOffset,
43 const unsigned int outShape[NumDims],
44 std::vector<float> outValues,
45 float outQuantScale,
46 int outQuantOffset)
47{
48 armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType};
49 armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType};
50
51 inputTensorInfo.SetQuantizationScale(quantScale);
52 inputTensorInfo.SetQuantizationOffset(quantOffset);
53
54 outputTensorInfo.SetQuantizationScale(outQuantScale);
55 outputTensorInfo.SetQuantizationOffset(outQuantOffset);
56
57 auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
58
59 LayerTestResult<T, NumDims> ret(outputTensorInfo);
60
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63
64 armnn::ElementwiseUnaryDescriptor desc(op);
65 armnn::ElementwiseUnaryQueueDescriptor qDesc;
66 qDesc.m_Parameters = desc;
67 armnn::WorkloadInfo info;
68 AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
69 AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
70 auto workload = CreateWorkload(workloadFactory, info, qDesc);
71
72 inputHandle->Allocate();
73 outputHandle->Allocate();
74
75 CopyDataToITensorHandle(inputHandle.get(), input.origin());
76
77 workload->PostAllocationConfigure();
78 ExecuteWorkload(*workload, memoryManager);
79
80 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
81
82 ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
83 inputTensorInfo));
84 return ret;
85}
86
87template <std::size_t NumDims,
88 armnn::DataType ArmnnType,
89 typename T = armnn::ResolveType<ArmnnType>>
90LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
91 armnn::IWorkloadFactory & workloadFactory,
92 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
93 armnn::UnaryOperation op,
94 const unsigned int shape[NumDims],
95 std::vector<float> values,
96 const unsigned int outShape[NumDims],
97 std::vector<float> outValues,
98 float quantScale = 1.0f,
99 int quantOffset = 0)
100{
101 return ElementwiseUnaryTestHelper<NumDims, ArmnnType>(
102 workloadFactory,
103 memoryManager,
104 op,
105 shape,
106 values,
107 quantScale,
108 quantOffset,
109 outShape,
110 outValues,
111 quantScale,
112 quantOffset);
113}