blob: 5fbec564359d09031ffe87c359fe6addf9d1dfa7 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "ConvertFp32ToFp16TestImpl.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007
telsoa01c577f2c2018-08-31 09:22:23 +01008
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <backendsCommon/test/TensorCopyUtils.hpp>
10#include <backendsCommon/test/WorkloadTestUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011
12#include <test/TensorHelpers.hpp>
13
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000014LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
15 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +010016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
17 const armnn::ITensorHandleFactory& tensorHandleFactory)
telsoa01c577f2c2018-08-31 09:22:23 +010018{
Jan Eilers8eb25602020-03-09 12:13:48 +000019 IgnoreUnused(memoryManager);
telsoa01c577f2c2018-08-31 09:22:23 +010020 using namespace half_float::literal;
21
22 const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
23 const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
24
25 auto input = MakeTensor<float, 4>(inputTensorInfo,
26 { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
27 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
28
29 LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
30 ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
31 { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
32 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
Keith Davisf500d6c2020-08-31 08:32:55 +010033
34 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
35 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
36
telsoa01c577f2c2018-08-31 09:22:23 +010037 armnn::ConvertFp32ToFp16QueueDescriptor data;
38 armnn::WorkloadInfo info;
39 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
40 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
41
42 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
43
44 inputHandle->Allocate();
45 outputHandle->Allocate();
46
47 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
48
49 workload->Execute();
50
51 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
52
53 return ret;
arovir01616e7752018-10-01 17:08:59 +010054}