blob: 4e5c1cf5ed7397d27c395c15f475b39b6a394582 [file] [log] [blame]
//
// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include "NeonBaseWorkload.hpp"
#include <arm_compute/runtime/IFunction.h>
#include <arm_compute/runtime/Tensor.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
namespace armnn
{
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
bool isFastMathEnabled = false,
const ActivationDescriptor* activationDescriptor = nullptr);
class NeonConvolution2dWorkload : public NeonBaseWorkload<Convolution2dQueueDescriptor>
{
public:
using BaseWorkload<Convolution2dQueueDescriptor>::m_Data;
NeonConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
const bool isFastMathENabled = false);
void Execute() const override;
arm_compute::ConvolutionMethod GetConvolutionMethod() const;
private:
std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
mutable std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
mutable std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
TensorInfo m_KernelTensorInfo;
TensorInfo m_BiasTensorInfo;
arm_compute::ConvolutionMethod m_ConvolutionMethod;
mutable bool prepared = false;
};
} //namespace armnn