blob: 96e2213bb961bd9d506099c9ec02727cb7504f79 [file] [log] [blame]
Manuel Bottini5209be52019-02-13 16:34:56 +00001/*
Matthew Bentham758b5ba2020-03-05 23:37:48 +00002 * Copyright (c) 2019-2020 ARM Limited.
Manuel Bottini5209be52019-02-13 16:34:56 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/CLValidate.h"
30#include "arm_compute/core/CL/ICLArray.h"
31#include "arm_compute/core/CL/ICLTensor.h"
32#include "arm_compute/core/CL/OpenCL.h"
33#include "arm_compute/core/Helpers.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/Utils.h"
36#include "arm_compute/core/Window.h"
Matthew Bentham758b5ba2020-03-05 23:37:48 +000037#include "support/StringSupport.h"
Manuel Bottini5209be52019-02-13 16:34:56 +000038
39namespace arm_compute
40{
41namespace
42{
43Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
44{
45 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors);
Georgios Pinitas8f5802f2019-02-22 11:08:32 +000046 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors);
Manuel Bottini5209be52019-02-13 16:34:56 +000047 ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
Michele Di Giorgio6b612f52019-09-05 12:30:22 +010048 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32);
Manuel Bottini5209be52019-02-13 16:34:56 +000049 ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
50 if(all_anchors->total_size() > 0)
51 {
52 size_t feature_height = info.feat_height();
53 size_t feature_width = info.feat_width();
54 size_t num_anchors = anchors->dimension(1);
55 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors);
56 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2);
57 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
58 ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
Michele Di Giorgio6b612f52019-09-05 12:30:22 +010059
60 if(is_data_type_quantized(anchors->data_type()))
61 {
62 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors);
63 }
Manuel Bottini5209be52019-02-13 16:34:56 +000064 }
65 return Status{};
66}
67} // namespace
68
69CLComputeAllAnchorsKernel::CLComputeAllAnchorsKernel()
70 : _anchors(nullptr), _all_anchors(nullptr)
71{
72}
73
74void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
75{
76 ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
77 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
78
79 // Metadata
80 const size_t num_anchors = anchors->info()->dimension(1);
81 const DataType data_type = anchors->info()->data_type();
82 const float width = info.feat_width();
83 const float height = info.feat_height();
84
85 // Initialize the output if empty
86 const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
Michele Di Giorgio6b612f52019-09-05 12:30:22 +010087 auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info()));
Manuel Bottini5209be52019-02-13 16:34:56 +000088
89 // Set instance variables
90 _anchors = anchors;
91 _all_anchors = all_anchors;
92
Michele Di Giorgio6b612f52019-09-05 12:30:22 +010093 const bool is_quantized = is_data_type_quantized(anchors->info()->data_type());
94
Manuel Bottini5209be52019-02-13 16:34:56 +000095 // Set build options
96 CLBuildOptions build_opts;
97 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
98 build_opts.add_option("-DWIDTH=" + float_to_string_with_full_precision(width));
99 build_opts.add_option("-DHEIGHT=" + float_to_string_with_full_precision(height));
100 build_opts.add_option("-DSTRIDE=" + float_to_string_with_full_precision(1.f / info.spatial_scale()));
101 build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors));
102 build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi()));
103
Michele Di Giorgio6b612f52019-09-05 12:30:22 +0100104 if(is_quantized)
105 {
106 const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform();
107 build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
108 build_opts.add_option("-DOFFSET=" + float_to_string_with_full_precision(qinfo.offset));
109 }
110
Manuel Bottini5209be52019-02-13 16:34:56 +0000111 // Create kernel
Michele Di Giorgio6b612f52019-09-05 12:30:22 +0100112 const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors";
113 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
Manuel Bottini5209be52019-02-13 16:34:56 +0000114
115 // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields).
116 // This means we don't need to pad on the X dimension, as we know in advance how many fields
117 // compose the struct.
118 Window win = calculate_max_window(*all_anchors->info(), Steps(info.values_per_roi()));
119 ICLKernel::configure_internal(win);
120}
121
122Status CLComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
123{
124 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
125 return Status{};
126}
127
128void CLComputeAllAnchorsKernel::run(const Window &window, cl::CommandQueue &queue)
129{
130 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
131 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
132
133 // Collapse everything on the first dimension
134 Window collapsed = window.collapse(ICLKernel::window(), Window::DimX);
135
136 // Set arguments
137 unsigned int idx = 0;
138 add_1D_tensor_argument(idx, _anchors, collapsed);
139 add_1D_tensor_argument(idx, _all_anchors, collapsed);
140
141 // Note that we don't need to loop over the slices, as we are launching exactly
142 // as many threads as all the anchors generated
Georgios Pinitas275f99c2019-08-23 12:44:11 +0100143 enqueue(queue, *this, collapsed, lws_hint());
Manuel Bottini5209be52019-02-13 16:34:56 +0000144}
145} // namespace arm_compute