blob: fdc6220d511636b9a0b498f6eb47b17d2a34da4f [file] [log] [blame]
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "ConvertBf16ToFp32TestImpl.hpp"
7
8#include <backendsCommon/test/TensorCopyUtils.hpp>
9#include <backendsCommon/test/WorkloadTestUtils.hpp>
10
11#include <test/TensorHelpers.hpp>
12
13LayerTestResult<float, 4> ConvertBf16ToFp32Test(
14 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +010015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
16 const armnn::ITensorHandleFactory& tensorHandleFactory)
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +000017{
18 IgnoreUnused(memoryManager);
19
20 const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16);
21 const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
22
23 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
24 {
25 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
26 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
27 },
28 1.0f, 0);
29
30 auto input = MakeTensor<armnn::BFloat16, 4>(inputTensorInfo, std::vector<armnn::BFloat16>(inputValues));
31
32 LayerTestResult<float, 4> ret(outputTensorInfo);
33 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
34 { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
35 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
36
Keith Davisf500d6c2020-08-31 08:32:55 +010037 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +000039
40 armnn::ConvertBf16ToFp32QueueDescriptor data;
41 armnn::WorkloadInfo info;
42 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
44
45 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info);
46
47 inputHandle->Allocate();
48 outputHandle->Allocate();
49
50 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
51
52 workload->Execute();
53
54 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
55
56 return ret;
57}