blob: dcef025a3df3e8fdc911502fa5cc551ccf98bfa5 [file] [log] [blame]
Narumol Prangnawarat250d3922020-03-30 16:11:04 +01001//
Teresa Charlin588cbdf2022-01-19 15:55:37 +00002// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat250d3922020-03-30 16:11:04 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "NeonConvertBf16ToFp32Workload.hpp"
7
8#include <armnnUtils/FloatingPointConverter.hpp>
9
10#include <BFloat16.hpp>
11
12#include <backendsCommon/WorkloadUtils.hpp>
13
14namespace armnn
15{
16
17NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor,
18 const WorkloadInfo& info)
19 : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info)
20{
21 this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1);
22 GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
23}
24
25void NeonConvertBf16ToFp32Workload::Execute() const
26{
Keith Davis2d0679f2021-08-05 11:35:00 +010027 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertBf16ToFp32Workload_Execute", this->GetGuid());
Narumol Prangnawarat250d3922020-03-30 16:11:04 +010028
29 auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
30 {
31 auto input = reinterpret_cast<const BFloat16*>(src);
32 auto output = reinterpret_cast<float*>(dst);
33 size_t numElements = size/2; // 2 bytes per Bf16
34 armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
35 };
36
37 for (const auto& pair : m_TensorHandlePairs)
38 {
39 CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
40 }
41}
42
43} //namespace armnn