blob: ad23b8c767698a57dc169aeecb3a516acfa11858 [file] [log] [blame]
mathad01b392e982021-04-07 12:07:30 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "CastTestImpl.hpp"
7#include "ElementwiseUnaryTestImpl.hpp"
8
9
10template<armnn::DataType inputDataType, armnn::DataType outputDataType, typename TInput, typename TOutput>
11LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
12 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
13 const armnn::ITensorHandleFactory& tensorHandleFactory,
14 const std::vector<TInput>& inputValues,
15 const std::vector<TOutput>& outputValues)
16{
17 IgnoreUnused(memoryManager);
18 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, inputDataType);
19 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, outputDataType);
20 float quantizationScale = 1.0f;
21 int32_t quantizationOffset = 0;
22
23 if(armnn::IsQuantizedType<TInput>())
24 {
25 inputTensorInfo.SetQuantizationScale(quantizationScale);
26 inputTensorInfo.SetQuantizationOffset(quantizationOffset);
27 }
28 if(armnn::IsQuantizedType<TOutput>())
29 {
30 outputTensorInfo.SetQuantizationScale(quantizationScale);
31 outputTensorInfo.SetQuantizationOffset(quantizationOffset);
32 }
33
34 auto input = MakeTensor<TInput, 4>(inputTensorInfo, inputValues);
35
36 LayerTestResult<TOutput, 4> ret(outputTensorInfo);
37 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outputValues);
38
39 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
41
42 armnn::CastQueueDescriptor data;
43 armnn::WorkloadInfo info;
44 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
45 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
46
47 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateCast(data, info);
48
49 inputHandle->Allocate();
50 outputHandle->Allocate();
51
52 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
53
54 workload->Execute();
55
56 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
57
58 return ret;
59}
60
61LayerTestResult<float, 4> CastInt32ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
62 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
63 const armnn::ITensorHandleFactory& tensorHandleFactory)
64{
65 std::vector<int32_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
66 3, 1, 3, 1, 2, 1, 3, 1, 3 };
67 std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
68 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
69 return CastTest<armnn::DataType::Signed32, armnn::DataType::Float32>(workloadFactory, memoryManager,
70 tensorHandleFactory, inputValues,
71 outputValues);
72}
73
74LayerTestResult<float, 4> CastInt16ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
75 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
76 const armnn::ITensorHandleFactory& tensorHandleFactory)
77{
78 std::vector<int16_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
79 3, 1, 3, 1, 2, 1, 3, 1, 3 };
80 std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
81 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
82 return CastTest<armnn::DataType::QSymmS16, armnn::DataType::Float32>(workloadFactory, memoryManager,
83 tensorHandleFactory, inputValues,
84 outputValues);
85}
86
87LayerTestResult<float, 4> CastInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
88 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
89 const armnn::ITensorHandleFactory& tensorHandleFactory)
90{
91 std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
92 3, 1, 3, 1, 2, 1, 3, 1, 3 };
93 std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
94 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
95 return CastTest<armnn::DataType::QSymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
96 tensorHandleFactory, inputValues,
97 outputValues);
98}
99
100LayerTestResult<float, 4> CastInt8AsymmToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
102 const armnn::ITensorHandleFactory& tensorHandleFactory)
103{
104 std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
105 3, 1, 3, 1, 2, 1, 3, 1, 3 };
106 std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
107 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
108 return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
109 tensorHandleFactory, inputValues, outputValues);
110}
111
112LayerTestResult<float, 4> CastUInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
114 const armnn::ITensorHandleFactory& tensorHandleFactory)
115{
116 std::vector<u_int8_t> inputValues = { 1, 3, 1, 3, 1, 3, 1, 3, 1,
117 3, 1, 3, 1, 2, 1, 3, 1, 3 };
118 std::vector<float> outputValues = { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
119 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
120 return CastTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(workloadFactory, memoryManager,
121 tensorHandleFactory, inputValues,
122 outputValues);
123}
124
125LayerTestResult<uint8_t, 4> CastInt8ToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
127 const armnn::ITensorHandleFactory& tensorHandleFactory)
128{
129 std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
130 3, 1, 3, 1, 2, 1, 3, 1, 3 };
131 std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 3, 1, 3, 1, 2, 1, 3, 1, 3 };
133 return CastTest<armnn::DataType::QSymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
134 tensorHandleFactory, inputValues,
135 outputValues);
136}
137
138LayerTestResult<uint8_t, 4> CastInt8AsymmToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
140 const armnn::ITensorHandleFactory& tensorHandleFactory)
141{
142 std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
143 3, 1, 3, 1, 2, 1, 3, 1, 3 };
144 std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
145 3, 1, 3, 1, 2, 1, 3, 1, 3 };
146 return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
147 tensorHandleFactory, inputValues,
148 outputValues);
149}
150
151LayerTestResult<float, 4> CastFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
153 const armnn::ITensorHandleFactory& tensorHandleFactory)
154{
155 using namespace half_float::literal;
156
157 std::vector<armnn::Half> inputValues = { -1.10_h, -3._h, -1.30_h, -3._h, -1._h, -3._h, -1._h, -3._h, 1._h,
158 3.10_h, 1._h, 3.30_h, 1._h, 2._h, 1._h, 3._h, 1._h, 3._h };
159 std::vector<float> outputValues = { -1.1f, -3.0f, -1.3f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
160 3.1f, 1.0f, 3.3f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
161 return CastTest<armnn::DataType::Float16, armnn::DataType::Float32>(workloadFactory, memoryManager,
162 tensorHandleFactory, inputValues,
163 outputValues);
164}
165
166LayerTestResult<float, 4> CastBFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
168 const armnn::ITensorHandleFactory& tensorHandleFactory)
169{
170
171 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
172 {
173 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
174 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
175 },
176 1.0f, 0);
177
178
179 std::vector<float> outputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
180 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f };
181
182 return CastTest<armnn::DataType::BFloat16, armnn::DataType::Float32>(workloadFactory, memoryManager,
183 tensorHandleFactory, inputValues, outputValues);
184}
185
186LayerTestResult<armnn::Half, 4> CastFloat32ToFloat162dTest(
187 armnn::IWorkloadFactory& workloadFactory,
188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
189 const armnn::ITensorHandleFactory& tensorHandleFactory)
190{
191 using namespace half_float::literal;
192
193 std::vector<float> inputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f,
194 0.00000004f, 3.4E38f, 300.0f, 0.5f, 1.3f, 1.5f, 2.1E4f, 8.76f, 15.2f, 37.5f };
195 std::vector<armnn::Half> outputValues = {-37.50_h, -15.20_h, -8.76_h, -2._h, -1.50_h, -1.30_h, -0.50_h, -0.40_h,
196 0._h, 6.55E4_h, 300._h, 0.50_h, 1.30_h, 1.50_h, 2.1E4_h, 8.76_h, 15.20_h, 37.50_h};
197
198 return CastTest<armnn::DataType::Float32, armnn::DataType::Float16>(workloadFactory, memoryManager,
199 tensorHandleFactory, inputValues,
200 outputValues);
201}
202
203LayerTestResult<int8_t , 4> CastFloat32ToInt82dTest(
204 armnn::IWorkloadFactory& workloadFactory,
205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
206 const armnn::ITensorHandleFactory& tensorHandleFactory)
207{
208 std::vector<float> inputValues = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
209 3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
210 std::vector<int8_t> outputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
211 3, 1, 3, 1, 2, 1, 3, 1, 3 };
212 return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmS8>(workloadFactory, memoryManager,
213 tensorHandleFactory, inputValues,
214 outputValues);
215}
216
217LayerTestResult<uint8_t , 4> CastFloat32ToUInt82dTest(
218 armnn::IWorkloadFactory& workloadFactory,
219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
220 const armnn::ITensorHandleFactory& tensorHandleFactory)
221{
222 std::vector<float> inputValues = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
223 3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
224 std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 1,
225 3, 1, 3, 1, 2, 1, 3, 1, 3 };
226 return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
227 tensorHandleFactory, inputValues,
228 outputValues);
229}