telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
Teresa Charlin | 588cbdf | 2022-01-19 15:55:37 +0000 | [diff] [blame] | 2 | // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 6 | #include "NeonBatchNormalizationWorkload.hpp" |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 7 | |
| 8 | #include "NeonWorkloadUtils.hpp" |
| 9 | |
Aron Virginas-Tar | c9cc804 | 2018-11-01 16:15:57 +0000 | [diff] [blame] | 10 | #include <aclCommon/ArmComputeTensorUtils.hpp> |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 11 | #include <aclCommon/ArmComputeUtils.hpp> |
| 12 | |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 13 | #include <armnn/utility/PolymorphicDowncast.hpp> |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 14 | |
Colm Donelan | 0c47974 | 2021-12-10 12:43:54 +0000 | [diff] [blame] | 15 | #include <armnn/backends/TensorHandle.hpp> |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 16 | |
| 17 | #include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 18 | |
| 19 | namespace armnn |
| 20 | { |
| 21 | using namespace armcomputetensorutils; |
| 22 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 23 | |
| 24 | arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input, |
| 25 | const TensorInfo& output, |
| 26 | const TensorInfo& mean, |
| 27 | const TensorInfo& var, |
| 28 | const TensorInfo& beta, |
| 29 | const TensorInfo& gamma, |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 30 | const BatchNormalizationDescriptor& descriptor, |
| 31 | const ActivationDescriptor* activationDescriptor) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 32 | { |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 33 | const arm_compute::TensorInfo aclInputInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 34 | armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 35 | const arm_compute::TensorInfo aclOutputInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 36 | armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 37 | const arm_compute::TensorInfo aclMeanInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 38 | armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 39 | const arm_compute::TensorInfo aclVarInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 40 | armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 41 | const arm_compute::TensorInfo aclBetaInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 42 | armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 43 | const arm_compute::TensorInfo aclGammaInfo = |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 44 | armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 45 | |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 46 | const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo( |
| 47 | activationDescriptor); |
| 48 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 49 | return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo, |
| 50 | &aclOutputInfo, |
| 51 | &aclMeanInfo, |
| 52 | &aclVarInfo, |
| 53 | &aclBetaInfo, |
| 54 | &aclGammaInfo, |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 55 | descriptor.m_Eps, |
| 56 | activationInfo); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 57 | } |
| 58 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 59 | NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 60 | const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) |
Teresa Charlin | 588cbdf | 2022-01-19 15:55:37 +0000 | [diff] [blame] | 61 | : NeonBaseWorkload<BatchNormalizationQueueDescriptor>(descriptor, info) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 62 | { |
Keith Davis | 2d0679f | 2021-08-05 11:35:00 +0100 | [diff] [blame] | 63 | // Report Profiling Details |
| 64 | ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonBatchNormalizationWorkload_Construct", |
| 65 | descriptor.m_Parameters, |
| 66 | info, |
| 67 | this->GetGuid()); |
| 68 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 69 | m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 70 | |
Jan Eilers | bb446e5 | 2020-04-02 13:56:54 +0100 | [diff] [blame] | 71 | arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); |
| 72 | arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 73 | |
Matthew Bentham | 8800c00 | 2018-11-19 13:19:28 +0000 | [diff] [blame] | 74 | arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); |
Nikhil Raj | d134093 | 2018-10-18 14:27:50 +0100 | [diff] [blame] | 75 | input.info()->set_data_layout(aclDataLayout); |
| 76 | output.info()->set_data_layout(aclDataLayout); |
| 77 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 78 | m_Mean = std::make_unique<arm_compute::Tensor>(); |
| 79 | BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 80 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 81 | m_Variance = std::make_unique<arm_compute::Tensor>(); |
| 82 | BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 83 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 84 | m_Gamma = std::make_unique<arm_compute::Tensor>(); |
| 85 | BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo()); |
| 86 | |
| 87 | m_Beta = std::make_unique<arm_compute::Tensor>(); |
| 88 | BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo()); |
| 89 | |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 90 | const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); |
| 91 | |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 92 | auto layer = std::make_unique<arm_compute::NEBatchNormalizationLayer>(); |
| 93 | layer->configure(&input, |
| 94 | &output, |
| 95 | m_Mean.get(), |
| 96 | m_Variance.get(), |
| 97 | m_Beta.get(), |
| 98 | m_Gamma.get(), |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 99 | m_Data.m_Parameters.m_Eps, |
| 100 | activationInfo); |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 101 | m_Layer.reset(layer.release()); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 102 | |
Nattapat Chaimanowong | 177d8d2 | 2018-10-16 13:21:27 +0100 | [diff] [blame] | 103 | InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean); |
| 104 | InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance); |
| 105 | InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma); |
| 106 | InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 107 | |
| 108 | // Force Compute Library to perform the necessary copying and reshaping, after which |
| 109 | // delete all the input tensors that will no longer be needed |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 110 | m_Layer->prepare(); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 111 | FreeUnusedTensors(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 112 | } |
| 113 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 114 | void NeonBatchNormalizationWorkload::Execute() const |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 115 | { |
Keith Davis | 2d0679f | 2021-08-05 11:35:00 +0100 | [diff] [blame] | 116 | ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonBatchNormalizationWorkload_Execute", this->GetGuid()); |
Matthew Bentham | d80a712 | 2019-01-08 17:52:37 +0000 | [diff] [blame] | 117 | m_Layer->run(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 118 | } |
| 119 | |
Matthew Bentham | c48ac8c | 2018-12-12 16:15:59 +0000 | [diff] [blame] | 120 | void NeonBatchNormalizationWorkload::FreeUnusedTensors() |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 121 | { |
| 122 | FreeTensorIfUnused(m_Mean); |
| 123 | FreeTensorIfUnused(m_Variance); |
| 124 | FreeTensorIfUnused(m_Gamma); |
| 125 | FreeTensorIfUnused(m_Beta); |
| 126 | } |
| 127 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 128 | } //namespace armnn |