Narumol Prangnawarat | 250d392 | 2020-03-30 16:11:04 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2020 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #include "NeonConvertBf16ToFp32Workload.hpp" |
| 7 | |
| 8 | #include <armnnUtils/FloatingPointConverter.hpp> |
| 9 | |
| 10 | #include <BFloat16.hpp> |
| 11 | |
| 12 | #include <backendsCommon/WorkloadUtils.hpp> |
| 13 | |
| 14 | namespace armnn |
| 15 | { |
| 16 | |
| 17 | NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, |
| 18 | const WorkloadInfo& info) |
| 19 | : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info) |
| 20 | { |
| 21 | this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1); |
| 22 | GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); |
| 23 | } |
| 24 | |
| 25 | void NeonConvertBf16ToFp32Workload::Execute() const |
| 26 | { |
| 27 | ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvertBf16ToFp32Workload_Execute"); |
| 28 | |
| 29 | auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size) |
| 30 | { |
| 31 | auto input = reinterpret_cast<const BFloat16*>(src); |
| 32 | auto output = reinterpret_cast<float*>(dst); |
| 33 | size_t numElements = size/2; // 2 bytes per Bf16 |
| 34 | armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output); |
| 35 | }; |
| 36 | |
| 37 | for (const auto& pair : m_TensorHandlePairs) |
| 38 | { |
| 39 | CopyTensorContentsGeneric(pair.first, pair.second, convertFunc); |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | } //namespace armnn |