blob: ec4f700034d05279c5e65c0f62b68354f37db0c4 [file] [log] [blame]
Anthony Barbier71d9b572018-07-06 17:05:59 +01001/*
Georgios Pinitas37d080f2019-06-21 18:43:12 +01002 * Copyright (c) 2018-2019 ARM Limited.
Anthony Barbier71d9b572018-07-06 17:05:59 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__
25#define __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__
26
27#include "arm_compute/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
28#include "arm_compute/runtime/IFunction.h"
29#include "arm_compute/runtime/IMemoryManager.h"
30#include "arm_compute/runtime/MemoryGroup.h"
31#include "arm_compute/runtime/Tensor.h"
32
33#include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp"
34
35namespace arm_compute
36{
37/** Assembly kernel glue */
Anthony Barbier71d9b572018-07-06 17:05:59 +010038class NEGEMMAssemblyDispatch : public IFunction
39{
40public:
41 /** Default constructor */
42 NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
43
44 /** Prevent instances of this class from being copy constructed */
Anthony Barbiereaefd002018-07-20 17:49:35 +010045 NEGEMMAssemblyDispatch(const NEGEMMAssemblyDispatch &) = delete;
Anthony Barbier71d9b572018-07-06 17:05:59 +010046 /** Prevent instances of this class from being copied */
Anthony Barbiereaefd002018-07-20 17:49:35 +010047 NEGEMMAssemblyDispatch &operator=(const NEGEMMAssemblyDispatch &) = delete;
48 NEGEMMAssemblyDispatch(NEGEMMAssemblyDispatch &&) = default;
49 NEGEMMAssemblyDispatch &operator=(NEGEMMAssemblyDispatch &&) = default;
50 ~NEGEMMAssemblyDispatch() = default;
51
52 class IFallback
53 {
54 public:
55 virtual void run() = 0;
56 virtual void prepare() = 0;
57 virtual bool is_configured() const = 0;
58 virtual ~IFallback() = default;
59 };
Anthony Barbier71d9b572018-07-06 17:05:59 +010060
61private:
62 /** ACL Function */
63 std::unique_ptr<IFunction> _function;
64
Anthony Barbierc8e84b52018-07-17 16:48:42 +010065 /** If supported create the ACL function corresponding to the GemmMethod provided to process the other passed parameters
66 *
Georgios Pinitas37d080f2019-06-21 18:43:12 +010067 * @param[in] method GemmMethod to use to perform the matrix multiplication.
68 * @param[in] a Input tensor (Matrix A).
69 * @param[in] b Input tensor (Matrix B).
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010070 * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations
Georgios Pinitas37d080f2019-06-21 18:43:12 +010071 * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
72 * @param[in] alpha Scalar multiplier to apply to AB matrix product.
73 * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
74 * @param[in] gemm_info GEMM meta-data
Anthony Barbierc8e84b52018-07-17 16:48:42 +010075 *
76 * @return True if the method is supported and the function was successfully created, false otherwise.
77 */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010078 bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
Anthony Barbierc8e84b52018-07-17 16:48:42 +010079
Anthony Barbiereaefd002018-07-20 17:49:35 +010080 /** Interface for the arm_gemm fallback */
Anthony Barbier3d677cc2018-07-23 16:42:59 +010081 std::unique_ptr<IFallback> _arm_gemm;
82 MemoryGroup _memory_group; /**< Function memory group */
83 std::shared_ptr<IMemoryManager> _memory_manager; /**< Copy of the memory manager used to create the memory group to be used when instantiating new functions */
Anthony Barbier71d9b572018-07-06 17:05:59 +010084public:
Anthony Barbierc8e84b52018-07-17 16:48:42 +010085 /** If supported create an ACL function else fallback to the arm_gemm function.
86 *
Georgios Pinitas37d080f2019-06-21 18:43:12 +010087 * @param[in] a Input tensor (Matrix A)
88 * @param[in] b Input tensor (Matrix B)
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010089 * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations
Georgios Pinitas37d080f2019-06-21 18:43:12 +010090 * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
91 * @param[in] alpha Scalar multiplier to apply to AB matrix product.
92 * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
93 * @param[in] gemm_info GEMM meta-data
Anthony Barbierc8e84b52018-07-17 16:48:42 +010094 */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010095 void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
Anthony Barbiereaefd002018-07-20 17:49:35 +010096
97 /** Indicates whether or not this function can be used to process the given parameters.
98 *
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010099 * @param[in] a Input tensor info (Matrix A)
100 * @param[in] b Input tensor info (Matrix B)
101 * @param[in] c Input tensor info (Matrix C) used to pass the bias for quantized calculations
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100102 * @param[in] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
103 * @param[in] alpha Scalar multiplier to apply to AB matrix product.
104 * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
105 * @param[in] gemm_info GEMM meta-data
Anthony Barbiereaefd002018-07-20 17:49:35 +0100106 *
107 * @return a status.
108 */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100109 static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100110 /** Was the function successfully configured ?
111 *
112 * @return True if the function is configured and ready to run
113 */
Anthony Barbier71d9b572018-07-06 17:05:59 +0100114 bool is_configured() const;
115 // Inherited methods overridden:
116 /** Runs a preparation step, usually for pre-transposing matrix b */
117 void prepare() override;
118 void run() override;
119};
120
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100121} // namespace arm_compute
Anthony Barbier71d9b572018-07-06 17:05:59 +0100122#endif /* __ARM_COMPUTE_NEGEMMASSEMBLYDISPATCH_H__ */