blob: 9605ed1543adbc8d6e1518de46592e2cfc1a4bdb [file] [log] [blame]
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/backends/Workload.hpp>
#include <arm_compute/core/Error.h>
#include <arm_compute/runtime/IFunction.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
namespace armnn
{
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor);
class NeonNormalizationFloatWorkload : public FloatWorkload<NormalizationQueueDescriptor>
{
public:
NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
// Replace input tensor handle with the given TensorHandle
void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
// Replace output tensor handle with the given TensorHandle
void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
std::unique_ptr<arm_compute::IFunction> m_NormalizationLayer;
virtual void Reconfigure();
};
} //namespace armnn