blob: e85af5683f6ccf949ec28dfda449d4d671ef604a [file] [log] [blame]
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "ConvertBf16ToFp32TestImpl.hpp"
7
8#include <backendsCommon/test/TensorCopyUtils.hpp>
9#include <backendsCommon/test/WorkloadTestUtils.hpp>
10
11#include <test/TensorHelpers.hpp>
12
13LayerTestResult<float, 4> ConvertBf16ToFp32Test(
14 armnn::IWorkloadFactory& workloadFactory,
15 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
16{
17 IgnoreUnused(memoryManager);
18
19 const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16);
20 const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
21
22 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
23 {
24 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
25 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
26 },
27 1.0f, 0);
28
29 auto input = MakeTensor<armnn::BFloat16, 4>(inputTensorInfo, std::vector<armnn::BFloat16>(inputValues));
30
31 LayerTestResult<float, 4> ret(outputTensorInfo);
32 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
33 { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
34 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
35
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010036 ARMNN_NO_DEPRECATE_WARN_BEGIN
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +000037 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010039 ARMNN_NO_DEPRECATE_WARN_END
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +000040
41 armnn::ConvertBf16ToFp32QueueDescriptor data;
42 armnn::WorkloadInfo info;
43 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
44 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
45
46 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info);
47
48 inputHandle->Allocate();
49 outputHandle->Allocate();
50
51 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
52
53 workload->Execute();
54
55 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
56
57 return ret;
58}