blob: f6d232b931198a7d69bc8e12a37151623bd87016 [file] [log] [blame]
Michele Di Giorgiod556d7b2020-10-27 10:56:31 +00001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
25#define ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H
26
27#include "arm_compute/runtime/IFunction.h"
28#include "arm_compute/runtime/IMemoryManager.h"
29#include "arm_compute/runtime/MemoryGroup.h"
30#include "arm_compute/runtime/NEON/INEOperator.h"
31#include "arm_compute/runtime/Tensor.h"
32#include "src/core/NEON/INEKernel.h"
33
34namespace arm_compute
35{
36// Forward Declarations
37class ITensor;
38struct PoolingLayerInfo;
39
40/** Assembly kernel glue */
41class NEPoolingAssemblyDispatch : public IFunction
42{
43public:
44 /** Constructor */
45 NEPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
46 /** Prevent instances of this class from being copied (As this class contains pointers) */
47 NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
48 /** Default move constructor */
49 NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&);
50 /** Prevent instances of this class from being copied (As this class contains pointers) */
51 NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
52 /** Default move assignment operator */
53 NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&);
54 /** Destructor */
55 ~NEPoolingAssemblyDispatch();
56
57 /** If supported create an assembly routine, else fallback to Compute Library function.
58 *
59 * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
60 * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
61 * @param[in] info Pooling meta-data
62 */
63 void configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &info);
64
65 /** Indicates whether or not this function can be used to process the given parameters.
66 *
67 * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
68 * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
69 * @param[in] info Pooling meta-data
70 *
71 * @return a status.
72 */
73 static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
74
75 /** Was the function successfully configured ?
76 *
77 * @return True if the function is configured and ready to run
78 */
79 bool is_configured() const;
80
81 // Inherited methods overridden:
82 void run() override;
83
84private:
85 /** Helper function to allocate memory for the workspace needed by the
86 * assembly kernels
87 *
88 * @param[in] workspace_size Total size of the workspace.
89 * @param[in] alignment Alignment requirement in bytes.
90 */
91 void allocate_workspace(size_t workspace_size, size_t alignment);
92
93 struct Impl;
94 std::unique_ptr<Impl> _impl;
95
96 MemoryGroup _memory_group{};
97 Tensor _workspace{};
98};
99
100namespace experimental
101{
102/** Basic function to run pooling assembly kernels */
103class NEPoolingAssemblyDispatch : public INEOperator
104{
105public:
106 /** Constructor */
107 NEPoolingAssemblyDispatch() = default;
108 /** Prevent instances of this class from being copied */
109 NEPoolingAssemblyDispatch(const NEPoolingAssemblyDispatch &) = delete;
110 /** Default move constructor */
111 NEPoolingAssemblyDispatch(NEPoolingAssemblyDispatch &&) = default;
112 /** Prevent instances of this class from being copied */
113 NEPoolingAssemblyDispatch &operator=(const NEPoolingAssemblyDispatch &) = delete;
114 /** Default move assignment operator */
115 NEPoolingAssemblyDispatch &operator=(NEPoolingAssemblyDispatch &&) = default;
116 /** Destructor */
117 ~NEPoolingAssemblyDispatch();
118
119 /** If supported create an assembly routine, else fallback to Compute Library function.
120 *
121 * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
122 * @param[out] output Output tensor to store the result of pooling. Data types supported: same as @p input.
123 * @param[in] info Pooling meta-data
124 */
125 void configure(const ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &info);
126
127 /** Indicates whether or not this function can be used to process the given parameters.
128 *
129 * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
130 * @param[in] output Output tensor to store the result of pooling. Data types supported: same as @p input.
131 * @param[in] info Pooling meta-data
132 *
133 * @return a status.
134 */
135 static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &info);
136 /** Was the function successfully configured ?
137 *
138 * @return True if the function is configured and ready to run
139 */
140 bool is_configured() const;
141 // Run method overriden
142 void run(ITensorPack &tensors) override;
143
144private:
145 bool _is_global_pooling_layer{ false };
146};
147} // namespace experimental
148} // namespace arm_compute
149#endif /* ARM_COMPUTE_NEPOOLINGASSEMBLYDISPATCH_H */