blob: ab95ddca54298a2265c76960ccb1e090d069fc78 [file] [log] [blame]
Manuel Bottini5209be52019-02-13 16:34:56 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/CLValidate.h"
30#include "arm_compute/core/CL/ICLArray.h"
31#include "arm_compute/core/CL/ICLTensor.h"
32#include "arm_compute/core/CL/OpenCL.h"
33#include "arm_compute/core/Helpers.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/Utils.h"
36#include "arm_compute/core/Window.h"
37
38namespace arm_compute
39{
40namespace
41{
42Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
43{
44 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors);
Georgios Pinitas8f5802f2019-02-22 11:08:32 +000045 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors);
Manuel Bottini5209be52019-02-13 16:34:56 +000046 ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::F16, DataType::F32);
48 ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
49 if(all_anchors->total_size() > 0)
50 {
51 size_t feature_height = info.feat_height();
52 size_t feature_width = info.feat_width();
53 size_t num_anchors = anchors->dimension(1);
54 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors);
55 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2);
56 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
57 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
58 }
59 return Status{};
60}
61} // namespace
62
63CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
64 : _anchors(nullptr), _all_anchors(nullptr)
65{
66}
67
68void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
69{
70 ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
71 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
72
73 // Metadata
74 const size_t num_anchors = anchors->info()->dimension(1);
75 const DataType data_type = anchors->info()->data_type();
76 const float width = info.feat_width();
77 const float height = info.feat_height();
78
79 // Initialize the output if empty
80 const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
81 auto_init_if_empty(*all_anchors->info(), output_shape, 1, data_type);
82
83 // Set instance variables
84 _anchors = anchors;
85 _all_anchors = all_anchors;
86
87 // Set build options
88 CLBuildOptions build_opts;
89 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
90 build_opts.add_option("-DWIDTH=" + float_to_string_with_full_precision(width));
91 build_opts.add_option("-DHEIGHT=" + float_to_string_with_full_precision(height));
92 build_opts.add_option("-DSTRIDE=" + float_to_string_with_full_precision(1.f / info.spatial_scale()));
93 build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors));
94 build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi()));
95
96 // Create kernel
97 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("generate_proposals_compute_all_anchors", build_opts.options()));
98
99 // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
100 // This means we don't need to pad on the X dimension, as we know in advance how many fields
101 // compose the struct.
102 Window win = calculate_max_window(*all_anchors->info(), Steps(info.values_per_roi()));
103 ICLKernel::configure_internal(win);
104}
105
106Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
107{
108 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
109 return Status{};
110}
111
112void CLComputeAllAnchorsKernel::run(const Window &window, cl::CommandQueue &queue)
113{
114 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
115 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
116
117 // Collapse everything on the first dimension
118 Window collapsed = window.collapse(ICLKernel::window(), Window::DimX);
119
120 // Set arguments
121 unsigned int idx = 0;
122 add_1D_tensor_argument(idx, _anchors, collapsed);
123 add_1D_tensor_argument(idx, _all_anchors, collapsed);
124
125 // Note that we don't need to loop over the slices, as we are launching exactly
126 // as many threads as all the anchors generated
127 enqueue(queue, *this, collapsed);
128}
129} // namespace arm_compute