blob: b86085f439c7c8d003cbfa973110f1bc7fc0a320 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2016, 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NECANNYEDGEKERNEL_H__
25#define __ARM_COMPUTE_NECANNYEDGEKERNEL_H__
26
27#include "arm_compute/core/NEON/INEKernel.h"
28
29#include <cstdint>
30
31namespace arm_compute
32{
33class ITensor;
34
35/** Computes magnitude and quantised phase from inputs gradients. */
36class NEGradientKernel : public INEKernel
37{
38public:
39 /** Default constructor */
40 NEGradientKernel();
41 /** Prevent instances of this class from being copied (As this class contains pointers) */
42 NEGradientKernel(const NEGradientKernel &) = delete;
43 /** Prevent instances of this class from being copied (As this class contains pointers) */
44 NEGradientKernel &operator=(const NEGradientKernel &) = delete;
45 /** Allow instances of this class to be moved */
46 NEGradientKernel(NEGradientKernel &&) = default;
47 /** Allow instances of this class to be moved */
48 NEGradientKernel &operator=(NEGradientKernel &&) = default;
49 /** Default destructor */
50 virtual ~NEGradientKernel() = default;
51
52 /** Initialise the kernel's sources, destinations and border mode.
53 *
54 * @note gx, gy and magnitude must all be the same size (either 16 or 32)
55 *
56 * @param[in] gx Source tensor - Gx component. Data type supported: S16/S32.
57 * @param[in] gy Source tensor - Gy component. Data type supported: same as @p gx.
58 * @param[out] magnitude Destination tensor - Magnitude. Data type supported: U16 (if the data type of @p gx is S16) / U32 (if the data type of @p gx is S32).
59 * @param[out] phase Destination tensor - Quantized phase. Data type supported: U8.
60 * @param[in] norm_type Normalization type. If 1, L1-Norm otherwise L2-Norm
61 */
62 virtual void configure(const ITensor *gx, const ITensor *gy, ITensor *magnitude, ITensor *phase, int32_t norm_type);
63
64 // Inherited methods overridden:
65 void run(const Window &window) override;
66
67protected:
68 /** Common signature for all the specialised gradient functions
69 *
70 * @param[in] gx_ptr Pointer to the first input tensor.
71 * @param[in] gy_ptr Pointer to the second input tensor.
72 * @param[out] magnitude_ptr Pointer to the first output tensor
73 * @param[out] phase_ptr Pointer to the second output tensor
74 */
75 using GradientFunction = void(const void *__restrict gx_ptr, const void *__restrict gy_ptr, void *__restrict magnitude_ptr, void *__restrict phase_ptr);
76
77 GradientFunction *_func; /**< Gradient function to use for the particular tensor types passed to configure() */
78 const ITensor *_gx; /**< Source tensor - Gx component */
79 const ITensor *_gy; /**< Source tensor - Gy component */
80 ITensor *_magnitude; /**< Destination tensor - Magnitude */
81 ITensor *_phase; /**< Destination tensor - Quantized phase */
82};
83
84#ifdef ARM_COMPUTE_ENABLE_FP16
85/** NEON kernel to perform Gradient computation
86 */
87class NEGradientFP16Kernel : public NEGradientKernel
88{
89public:
90 // Inherited methods overriden:
91 void configure(const ITensor *gx, const ITensor *gy, ITensor *magnitude, ITensor *phase, int32_t norm_type) override;
92};
93#else /* ARM_COMPUTE_ENABLE_FP16 */
94using NEGradientFP16Kernel = NEGradientKernel;
95#endif /* ARM_COMPUTE_ENABLE_FP16 */
96
97/** NEON kernel to perform Non-Maxima suppression for Canny Edge.
98 *
99 * @note This kernel is meant to be used alongside CannyEdge and performs a non-maxima suppression using magnitude and phase of input
100 * to characterize points as possible edges. Thus, at the end, each point will be set to EDGE, NO_EDGE or MAYBE.
101 *
102 * @note Hysteresis is computed in @ref NEEdgeTraceKernel
103 */
104class NEEdgeNonMaxSuppressionKernel : public INEKernel
105{
106public:
107 /** Default constructor */
108 NEEdgeNonMaxSuppressionKernel();
109 /** Prevent instances of this class from being copied (As this class contains pointers) */
110 NEEdgeNonMaxSuppressionKernel(const NEEdgeNonMaxSuppressionKernel &) = delete;
111 /** Prevent instances of this class from being copied (As this class contains pointers) */
112 NEEdgeNonMaxSuppressionKernel &operator=(const NEEdgeNonMaxSuppressionKernel &) = delete;
113 /** Allow instances of this class to be moved */
114 NEEdgeNonMaxSuppressionKernel(NEEdgeNonMaxSuppressionKernel &&) = default;
115 /** Allow instances of this class to be moved */
116 NEEdgeNonMaxSuppressionKernel &operator=(NEEdgeNonMaxSuppressionKernel &&) = default;
117 /** Default destructor */
118 ~NEEdgeNonMaxSuppressionKernel() = default;
119
120 /** Initialise the kernel's sources, destination and border mode.
121 *
122 * @param[in] magnitude Source tensor - Magnitude. Data type supported: U16/U32.
123 * @param[in] phase Source tensor - Quantized phase. Data type supported: U8.
124 * @param[out] output Output tensor. Data type supported: U8. It will be filled with 0 for "no edge", 127 for "maybe", 255 for "edge"
125 * @param[in] upper_thr Upper threshold used for the hysteresis
126 * @param[in] lower_thr Lower threshold used for the hysteresis
127 * @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
128 */
129 void configure(const ITensor *magnitude, const ITensor *phase, ITensor *output, int32_t upper_thr, int32_t lower_thr, bool border_undefined);
130
131 // Inherited methods overridden:
132 void run(const Window &window) override;
133 BorderSize border_size() const override;
134
135private:
136 /** Common signature for all the specialised non-maxima suppression functions
137 *
138 * @param[in] magnitude_ptr Pointer to the first input tensor.
139 * @param[in] phase_ptr Pointer to the second input tensor.
140 * @param[out] output_ptr Pointer to the output tensor
141 * @param[in] stride_mag Stride of the magnitude tensor
142 * @param[in] upper_thr Upper threshold used for the hysteresis
143 * @param[in] lower_thr Lower threshold used for the hysteresis
144 */
145 using EdgeNonMaxSupprFunction = void(const void *__restrict magnitude_ptr, const void *__restrict phase_ptr, void *__restrict output_ptr, const uint32_t stride_mag, const int32_t upper_thr,
146 const int32_t lower_thr);
147
148 EdgeNonMaxSupprFunction *_func; /**< Non-Maxima suppression function to use for the particular tensor types passed to configure() */
149 const ITensor *_magnitude; /**< Source tensor - Magnitude */
150 const ITensor *_phase; /**< Source tensor - Quantized phase */
151 ITensor *_output; /**< Destination tensor */
152 int32_t _lower_thr; /**< Lower threshold used for the hysteresis */
153 int32_t _upper_thr; /**< Upper threshold used for the hysteresis */
154};
155
156/** NEON kernel to perform Edge tracing */
157class NEEdgeTraceKernel : public INEKernel
158{
159public:
160 /** Default constructor */
161 NEEdgeTraceKernel();
162 /** Prevent instances of this class from being copied (As this class contains pointers) */
163 NEEdgeTraceKernel(const NEEdgeTraceKernel &) = delete;
164 /** Prevent instances of this class from being copied (As this class contains pointers) */
165 NEEdgeTraceKernel &operator=(const NEEdgeTraceKernel &) = delete;
166 /** Allow instances of this class to be moved */
167 NEEdgeTraceKernel(NEEdgeTraceKernel &&) = default;
168 /** Allow instances of this class to be moved */
169 NEEdgeTraceKernel &operator=(NEEdgeTraceKernel &&) = default;
170 /** Default constructor */
171 ~NEEdgeTraceKernel() = default;
172
173 /** Initialise the kernel's source, destination and border mode.
174 *
175 * @param[in,out] input Source tensor. Data type supported: U8. Must contain 0 for "no edge", 127 for "maybe", 255 for "edge"
176 * @param[in,out] output Destination tensor. Data type supported: U8. Must be initialized to 0 (No edge).
177 */
178 void configure(ITensor *input, ITensor *output);
179
180 // Inherited methods overridden:
181 void run(const Window &window) override;
182 BorderSize border_size() const override;
183 bool is_parallelisable() const override;
184
185private:
186 ITensor *_input; /**< Source tensor */
187 ITensor *_output; /**< Destination tensor */
188};
189}
190#endif /* __ARM_COMPUTE_NECANNYEDGEKERNEL_H */