telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 6 | #include "NeonBatchNormalizationWorkload.hpp" |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 7 | |
| 8 | #include "NeonWorkloadUtils.hpp" |
| 9 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 10 | #include <aclCommon/ArmComputeTensorUtils.hpp> |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 11 | #include <aclCommon/ArmComputeUtils.hpp> |
| 12 | |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 13 | #include <armnn/utility/PolymorphicDowncast.hpp> |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 14 | |
James Conroy | 1f58f03 | 2021-04-27 17:13:27 +0100 | [diff] [blame] | 15 | #include <backendsCommon/TensorHandle.hpp> |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 16 | |
| 17 | #include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 18 | |
| 19 | namespace armnn |
| 20 | { |
| 21 | using namespace armcomputetensorutils; |
| 22 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 23 | |
| 24 | arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input, |
| 25 | const TensorInfo& output, |
| 26 | const TensorInfo& mean, |
| 27 | const TensorInfo& var, |
| 28 | const TensorInfo& beta, |
| 29 | const TensorInfo& gamma, |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 30 | const BatchNormalizationDescriptor& descriptor, |
| 31 | const ActivationDescriptor* activationDescriptor) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 32 | { |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 33 | const arm_compute::TensorInfo aclInputInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 34 | armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 35 | const arm_compute::TensorInfo aclOutputInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 36 | armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 37 | const arm_compute::TensorInfo aclMeanInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 38 | armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 39 | const arm_compute::TensorInfo aclVarInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 40 | armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 41 | const arm_compute::TensorInfo aclBetaInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 42 | armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 43 | const arm_compute::TensorInfo aclGammaInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 44 | armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 45 | |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 46 | const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo( |
| 47 | activationDescriptor); |
| 48 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 49 | return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo, |
| 50 | &aclOutputInfo, |
| 51 | &aclMeanInfo, |
| 52 | &aclVarInfo, |
| 53 | &aclBetaInfo, |
| 54 | &aclGammaInfo, |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 55 | descriptor.m_Eps, |
| 56 | activationInfo); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 57 | } |
| 58 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 59 | NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 60 | const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 61 | : BaseWorkload<BatchNormalizationQueueDescriptor>(descriptor, info) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 62 | { |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 63 | m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 64 | |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 65 | arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); |
| 66 | arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 67 | |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 68 | arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 69 | input.info()->set_data_layout(aclDataLayout); |
| 70 | output.info()->set_data_layout(aclDataLayout); |
| 71 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 72 | m_Mean = std::make_unique<arm_compute::Tensor>(); |
| 73 | BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 74 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 75 | m_Variance = std::make_unique<arm_compute::Tensor>(); |
| 76 | BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 77 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 78 | m_Gamma = std::make_unique<arm_compute::Tensor>(); |
| 79 | BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo()); |
| 80 | |
| 81 | m_Beta = std::make_unique<arm_compute::Tensor>(); |
| 82 | BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo()); |
| 83 | |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 84 | const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); |
| 85 | |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 86 | auto layer = std::make_unique<arm_compute::NEBatchNormalizationLayer>(); |
| 87 | layer->configure(&input, |
| 88 | &output, |
| 89 | m_Mean.get(), |
| 90 | m_Variance.get(), |
| 91 | m_Beta.get(), |
| 92 | m_Gamma.get(), |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 93 | m_Data.m_Parameters.m_Eps, |
| 94 | activationInfo); |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 95 | m_Layer.reset(layer.release()); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 96 | |
Nattapat Chaimanowong | 177d8d2 | 2018-10-16 13:21:27 +0100 | [diff] [blame] | 97 | InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean); |
| 98 | InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance); |
| 99 | InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma); |
| 100 | InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 101 | |
| 102 | // Force Compute Library to perform the necessary copying and reshaping, after which |
| 103 | // delete all the input tensors that will no longer be needed |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 104 | m_Layer->prepare(); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 105 | FreeUnusedTensors(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 106 | } |
| 107 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 108 | void NeonBatchNormalizationWorkload::Execute() const |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 109 | { |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 110 | ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationWorkload_Execute"); |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 111 | m_Layer->run(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 112 | } |
| 113 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 114 | void NeonBatchNormalizationWorkload::FreeUnusedTensors() |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 115 | { |
| 116 | FreeTensorIfUnused(m_Mean); |
| 117 | FreeTensorIfUnused(m_Variance); |
| 118 | FreeTensorIfUnused(m_Gamma); |
| 119 | FreeTensorIfUnused(m_Beta); |
| 120 | } |
| 121 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 122 | } //namespace armnn |