Narumol Prangnawarat | 250d392 | 2020-03-30 16:11:04 +0100 | [diff] [blame] | 1 | // |
Teresa Charlin | 588cbdf | 2022-01-19 15:55:37 +0000 | [diff] [blame] | 2 | // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. |
Narumol Prangnawarat | 250d392 | 2020-03-30 16:11:04 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #include "NeonConvertFp32ToBf16Workload.hpp" |
| 7 | |
| 8 | #include <BFloat16.hpp> |
| 9 | #include <Profiling.hpp> |
| 10 | |
| 11 | #include <armnnUtils/FloatingPointConverter.hpp> |
| 12 | |
| 13 | #include <backendsCommon/WorkloadUtils.hpp> |
| 14 | |
| 15 | namespace armnn |
| 16 | { |
| 17 | |
| 18 | NeonConvertFp32ToBf16Workload::NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, |
| 19 | const WorkloadInfo& info) |
| 20 | : Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>(descriptor, info) |
| 21 | { |
| 22 | this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToBf16Workload", 1, 1); |
| 23 | GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); |
| 24 | } |
| 25 | |
| 26 | void NeonConvertFp32ToBf16Workload::Execute() const |
| 27 | { |
Keith Davis | 2d0679f | 2021-08-05 11:35:00 +0100 | [diff] [blame] | 28 | ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToBf16Workload_Execute", this->GetGuid()); |
Narumol Prangnawarat | 250d392 | 2020-03-30 16:11:04 +0100 | [diff] [blame] | 29 | |
| 30 | auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size) |
| 31 | { |
| 32 | auto input = reinterpret_cast<const float*>(src); |
| 33 | auto output = reinterpret_cast<BFloat16*>(dst); |
| 34 | size_t numElements = size/2; // 2 bytes per bf16 |
| 35 | armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output); |
| 36 | }; |
| 37 | |
| 38 | for (const auto& pair : m_TensorHandlePairs) |
| 39 | { |
| 40 | CopyTensorContentsGeneric(pair.first, pair.second, convertFunc); |
| 41 | } |
| 42 | } |
| 43 | |
David Monahan | ec81999 | 2022-02-10 14:47:13 +0000 | [diff] [blame] | 44 | void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) |
| 45 | { |
| 46 | ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; |
| 47 | this->m_Data.m_Inputs[slot] = tensorHandle; |
| 48 | try |
| 49 | { |
| 50 | Reconfigure(); |
| 51 | } |
| 52 | catch(armnn::UnimplementedException& e) |
| 53 | { |
| 54 | // Cannot reconfigure, revert the slot back and throw the exception. |
| 55 | this->m_Data.m_Inputs[slot] = backupHandle; |
| 56 | throw e; |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | // Replace output tensor handle with the given TensorHandle |
| 61 | void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) |
| 62 | { |
| 63 | ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; |
| 64 | this->m_Data.m_Inputs[slot] = tensorHandle; |
| 65 | try |
| 66 | { |
| 67 | Reconfigure(); |
| 68 | } |
| 69 | catch(armnn::UnimplementedException& e) |
| 70 | { |
| 71 | // Cannot reconfigure, revert the slot back and throw the exception. |
| 72 | this->m_Data.m_Inputs[slot] = backupHandle; |
| 73 | throw e; |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | void NeonConvertFp32ToBf16Workload::Reconfigure() |
| 78 | { |
| 79 | throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); |
| 80 | } |
| 81 | |
Narumol Prangnawarat | 250d392 | 2020-03-30 16:11:04 +0100 | [diff] [blame] | 82 | } //namespace armnn |