blob: 7a2ff9ac1a4a2fcde8a0da453d735aec9fe9759f [file] [log] [blame]
Narumol Prangnawarat250d3922020-03-30 16:11:04 +01001//
Teresa Charlin588cbdf2022-01-19 15:55:37 +00002// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat250d3922020-03-30 16:11:04 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "NeonConvertBf16ToFp32Workload.hpp"
7
8#include <armnnUtils/FloatingPointConverter.hpp>
9
10#include <BFloat16.hpp>
11
12#include <backendsCommon/WorkloadUtils.hpp>
13
14namespace armnn
15{
16
17NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor,
18 const WorkloadInfo& info)
19 : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info)
20{
21 this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1);
22 GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
23}
24
25void NeonConvertBf16ToFp32Workload::Execute() const
26{
Keith Davis2d0679f2021-08-05 11:35:00 +010027 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertBf16ToFp32Workload_Execute", this->GetGuid());
Narumol Prangnawarat250d3922020-03-30 16:11:04 +010028
29 auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
30 {
31 auto input = reinterpret_cast<const BFloat16*>(src);
32 auto output = reinterpret_cast<float*>(dst);
33 size_t numElements = size/2; // 2 bytes per Bf16
34 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
35 };
36
37 for (const auto& pair : m_TensorHandlePairs)
38 {
39 CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
40 }
41}
42
David Monahanec819992022-02-10 14:47:13 +000043void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
44{
45 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
46 this->m_Data.m_Inputs[slot] = tensorHandle;
47 try
48 {
49 Reconfigure();
50 }
51 catch(armnn::UnimplementedException& e)
52 {
53 // Cannot reconfigure, revert the slot back and throw the exception.
54 this->m_Data.m_Inputs[slot] = backupHandle;
55 throw e;
56 }
57}
58
59// Replace output tensor handle with the given TensorHandle
60void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
61{
62 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
63 this->m_Data.m_Inputs[slot] = tensorHandle;
64 try
65 {
66 Reconfigure();
67 }
68 catch(armnn::UnimplementedException& e)
69 {
70 // Cannot reconfigure, revert the slot back and throw the exception.
71 this->m_Data.m_Inputs[slot] = backupHandle;
72 throw e;
73 }
74}
75
76void NeonConvertBf16ToFp32Workload::Reconfigure()
77{
78 throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
79}
80
Narumol Prangnawarat250d3922020-03-30 16:11:04 +010081} //namespace armnn