blob: 5d100a4c1e0604e379bf8755072badf8d0624327 [file] [log] [blame]
giuros01cd96a262018-10-03 12:44:35 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/CLValidate.h"
30#include "arm_compute/core/CL/ICLArray.h"
31#include "arm_compute/core/CL/ICLTensor.h"
32#include "arm_compute/core/CL/OpenCL.h"
33#include "arm_compute/core/Helpers.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/Utils.h"
36#include "arm_compute/core/Window.h"
37
38namespace arm_compute
39{
40namespace
41{
42Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
43{
44 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors);
45 ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
46 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::F16, DataType::F32);
47 ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
48 if(all_anchors->total_size() > 0)
49 {
50 size_t feature_height = info.feat_height();
51 size_t feature_width = info.feat_width();
52 size_t num_anchors = anchors->dimension(1);
53 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors);
54 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2);
55 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
56 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
57 }
58 return Status{};
59}
60} // namespace
61
62CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
63 : _anchors(nullptr), _all_anchors(nullptr)
64{
65}
66
67void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
68{
69 ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
70 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
71
72 // Metadata
73 const size_t num_anchors = anchors->info()->dimension(1);
74 const DataType data_type = anchors->info()->data_type();
75 const float width = info.feat_width();
76 const float height = info.feat_height();
77
78 // Initialize the output if empty
79 const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
80 auto_init_if_empty(*all_anchors->info(), output_shape, 1, data_type);
81
82 // Set instance variables
83 _anchors = anchors;
84 _all_anchors = all_anchors;
85
86 // Set build options
87 CLBuildOptions build_opts;
88 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
89 build_opts.add_option("-DWIDTH=" + float_to_string_with_full_precision(width));
90 build_opts.add_option("-DHEIGHT=" + float_to_string_with_full_precision(height));
91 build_opts.add_option("-DSTRIDE=" + float_to_string_with_full_precision(1.f / info.spatial_scale()));
92 build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors));
93 build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi()));
94
95 // Create kernel
96 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("generate_proposals_compute_all_anchors", build_opts.options()));
97
98 // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
99 // This means we don't need to pad on the X dimension, as we know in advance how many fields
100 // compose the struct.
101 Window win = calculate_max_window(*all_anchors->info(), Steps(info.values_per_roi()));
102 ICLKernel::configure_internal(win);
103}
104
105Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
106{
107 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
108 return Status{};
109}
110
111void CLComputeAllAnchorsKernel::run(const Window &window, cl::CommandQueue &queue)
112{
113 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
114 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
115
116 // Collapse everything on the first dimension
117 Window collapsed = window.collapse(ICLKernel::window(), Window::DimX);
118
119 // Set arguments
120 unsigned int idx = 0;
121 add_1D_tensor_argument(idx, _anchors, collapsed);
122 add_1D_tensor_argument(idx, _all_anchors, collapsed);
123
124 // Note that we don't need to loop over the slices, as we are launching exactly
125 // as many threads as all the anchors generated
126 enqueue(queue, *this, collapsed);
127}
128} // namespace arm_compute