blob: 85285d6704d49f10ac0b4e08d532f57898d73e1c [file] [log] [blame]
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
25
Manuel Bottini959c26d2019-12-02 16:22:35 +000026#include "arm_compute/core/CL/CLHelpers.h"
Gian Marco Iodice4b908652018-10-18 10:21:02 +010027#include "arm_compute/core/CL/ICLTensor.h"
Gian Marco Iodice4b908652018-10-18 10:21:02 +010028#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/TensorInfo.h"
Gian Marco Iodice4b908652018-10-18 10:21:02 +010030#include "arm_compute/core/Utils.h"
31#include "arm_compute/core/Validate.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032#include "src/core/AccessWindowStatic.h"
33#include "src/core/helpers/AutoConfiguration.h"
34#include "src/core/helpers/WindowHelpers.h"
Matthew Bentham758b5ba2020-03-05 23:37:48 +000035#include "support/StringSupport.h"
Gian Marco Iodice4b908652018-10-18 10:21:02 +010036
37#include <cstddef>
38#include <cstdint>
39
Gian Marco Iodice4b908652018-10-18 10:21:02 +010040namespace arm_compute
41{
Gian Marco Iodice4b908652018-10-18 10:21:02 +010042namespace
43{
44Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000045 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010046{
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
Gian Marco Iodice4b908652018-10-18 10:21:02 +010048
49 if(bias != nullptr)
50 {
51 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
52 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
53 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
54 }
55
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000056 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
57 ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
58 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
59 ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
60 if(output_stage.is_quantized_per_channel)
61 {
62 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
63 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
64 }
65
Gian Marco Iodice4b908652018-10-18 10:21:02 +010066 // If a_offset == 0, vector_sum_col can be a nullptr
67 if(a_offset != 0)
68 {
69 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
70 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
71 }
72
73 // If b_offset == 0, vector_sum_row can be a nullptr
74 if(b_offset != 0)
75 {
76 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
77
78 // Check if input is a 3D reinterpretation
79 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
80
81 // Validate input
82 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
83 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
84
85 TensorShape output_shape = mm_result->tensor_shape();
86 if(output_shape.num_dimensions() > 1)
87 {
88 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
89
90 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
91 vector_sum_row_shape.collapse_from(1);
92 output_shape.collapse_from(output_batch_idx);
93
94 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
95 "mm_result tensor must have the same number of batches of output tensor");
96
97 if(a_offset != 0)
98 {
99 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
100 vector_sum_col_shape.collapse_from(1);
101
102 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
103 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
104 }
105 }
106 }
107
Manuel Bottini959c26d2019-12-02 16:22:35 +0000108 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
109 // Checks performed when output is configured
110 if((output != nullptr) && (output->total_size() != 0))
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100111 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000112 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != output->data_type());
113 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100114 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
115 }
116
Giorgio Arena1856ff72020-02-07 13:46:45 +0000117 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000118 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(), "per channel quantization info is incorrect");
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000119
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100120 return Status{};
121}
122
123std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, ITensorInfo *output,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000124 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, ITensorInfo *output_multipliers, ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100125{
126 constexpr unsigned int num_elems_processed_per_iteration = 4;
127 bool window_changed = false;
128
129 // Auto initialize the output
Manuel Bottini959c26d2019-12-02 16:22:35 +0000130 auto_init_if_empty(*output, mm_result->clone()->set_data_type(output_stage.output_data_type));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100131
132 // Configure kernel window
133 Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
134
135 AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
136 window_changed = window_changed || update_window_and_padding(win, mm_result_access);
137
138 AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
139 window_changed = window_changed || update_window_and_padding(win, output_access);
140
141 if(a_offset != 0)
142 {
143 AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
144 window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
145 }
146 if(b_offset != 0)
147 {
148 AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
149 window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
150 }
151
152 if(bias != nullptr)
153 {
154 AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
155 window_changed = window_changed || update_window_and_padding(win, bias_access);
156 }
157
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000158 if(output_multipliers->dimension(0) > 1)
159 {
160 AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration);
161 AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration);
162 window_changed = window_changed || update_window_and_padding(win, output_multipliers_access, output_shifts_access);
163 }
164
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100165 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
166 return std::make_pair(err, win);
167}
168} // namespace
169
170CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000171 : _mm_result(nullptr),
172 _vector_sum_col(nullptr),
173 _vector_sum_row(nullptr),
174 _bias(nullptr),
175 _output(nullptr),
176 _output_multipliers(nullptr),
177 _output_shifts(nullptr),
178 _is_quantized_per_channel(false)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100179{
180}
181
182void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000183 int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
184 const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100185{
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100186 configure(CLKernelLibrary::get().get_compile_context(), mm_result, vector_sum_col, vector_sum_row, bias, output, k, a_offset, b_offset, output_stage, output_multipliers, output_shifts);
187}
188
Manuel Bottini679fc962020-04-21 16:08:53 +0100189void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileContext &compile_context, const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row,
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100190 const ICLTensor *bias, ICLTensor *output,
191 int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
192 const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
193{
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100194 // Perform validate step
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000195 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output, output_multipliers, output_shifts);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100196 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
197 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
198 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
199 bias != nullptr ? bias->info() : nullptr,
200 output->info(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000201 a_offset, b_offset, output_stage,
202 output_multipliers->info(), output_shifts->info())); // NOLINT
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100203
204 const int min = output_stage.gemmlowp_min_bound;
205 const int max = output_stage.gemmlowp_max_bound;
206
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000207 _vector_sum_col = vector_sum_col;
208 _vector_sum_row = vector_sum_row;
209 _mm_result = mm_result;
210 _bias = bias;
211 _output = output;
212 _output_multipliers = output_multipliers;
213 _output_shifts = output_shifts;
214 _is_quantized_per_channel = output_stage.is_quantized_per_channel;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100215
216 // Check if input is a 3D reinterpretation
217 const bool reinterpret_as_3d = vector_sum_row != nullptr
218 && mm_result->info()->num_dimensions() > 1
219 && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
220
221 // Set the arguments to pass at compile time
222 CLBuildOptions build_opts;
223
224 // If a_offset == 0, vector_sum_col can be a nullptr
225 if(a_offset != 0)
226 {
227 build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
228 build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
229 }
230 // If b_offset == 0, vector_sum_row can be a nullptr
231 build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
232 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
233 build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(1)));
234 build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
235 build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
236 build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000237 build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
238 build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
239 build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
Manuel Bottini959c26d2019-12-02 16:22:35 +0000240 build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
241
242 PixelValue min_val{};
243 PixelValue max_val{};
244 std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
Giorgio Arena1856ff72020-02-07 13:46:45 +0000245 build_opts.add_option_if((min > min_val.get<int32_t>()), "-DMIN_BOUND=" + support::cpp11::to_string(min));
246 build_opts.add_option_if((max < max_val.get<int32_t>()), "-DMAX_BOUND=" + support::cpp11::to_string(max));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100247
248 std::string kernel_name("gemmlowp_offset_contribution");
Manuel Bottini959c26d2019-12-02 16:22:35 +0000249 kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100250
251 // Create kernel
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100252 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100253
254 // Configure kernel window
255 auto win_config = validate_and_configure_window(mm_result->info(),
256 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
257 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
258 bias != nullptr ? bias->info() : nullptr,
259 output->info(),
Manuel Bottini959c26d2019-12-02 16:22:35 +0000260 a_offset, b_offset, output_stage,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000261 output_multipliers->info(), output_shifts->info()); // NOLINT
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100262 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
263 ICLKernel::configure_internal(win_config.second);
264
265 // Set config_id for enabling LWS tuning
266 _config_id = kernel_name + "_";
267 _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
268 _config_id += "_";
269 _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
270 _config_id += "_";
271 _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
272}
273
274Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000275 const ITensorInfo *output, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
276 const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100277{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000278 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100279 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
280 vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
281 vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
282 bias != nullptr ? bias->clone().get() : nullptr,
283 output->clone().get(),
Manuel Bottini959c26d2019-12-02 16:22:35 +0000284 a_offset, b_offset, output_stage,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000285 output_multipliers->clone().get(), output_shifts->clone().get())
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100286 .first); // NOLINT
287
288 return Status{};
289}
290
291void CLGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, cl::CommandQueue &queue)
292{
293 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
294 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
295
296 Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
297 Window slice = collapsed.first_slice_window_3D();
298
299 // Set window for vector_sum_col
300 Window win_vector_sum_col = slice;
301 win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
302 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
303
304 // Set window for vector_sum_row
305 Window win_vector_sum_row = slice;
306 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
307 win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
308 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
309
310 Window biases_slice = slice;
311 biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
312 biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
313
314 do
315 {
316 unsigned int idx = 0;
317 add_3D_tensor_argument(idx, _mm_result, slice);
Michalis Spyroue1651a52019-07-11 15:00:49 +0100318 add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
319 add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
320 add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100321 add_3D_tensor_argument(idx, _output, slice);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000322 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice);
323 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100324 enqueue(queue, *this, slice, lws_hint());
325 }
326 while(collapsed.slide_window_slice_3D(slice));
327}
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000328} // namespace arm_compute