COMPMID-2177 Fix clang warnings
Change-Id: I78039db8c58d7b14a042c41e54c25fb9cb509bf7
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1092
Reviewed-by: VidhyaSudhan Loganathan <vidhyasudhan.loganathan@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/BlobLifetimeManager.cpp b/src/runtime/BlobLifetimeManager.cpp
index c5d42b1..1323bb3 100644
--- a/src/runtime/BlobLifetimeManager.cpp
+++ b/src/runtime/BlobLifetimeManager.cpp
@@ -66,7 +66,7 @@
std::vector<BlobInfo> group_sizes;
std::transform(std::begin(_free_blobs), std::end(_free_blobs), std::back_inserter(group_sizes), [](const Blob & b)
{
- return BlobInfo(b.max_size, b.max_alignment);
+ return BlobInfo{ b.max_size, b.max_alignment };
});
// Update blob sizes
@@ -75,7 +75,7 @@
group_sizes.resize(max_size);
std::transform(std::begin(_blobs), std::end(_blobs), std::begin(group_sizes), std::begin(_blobs), [](BlobInfo lhs, BlobInfo rhs)
{
- return BlobInfo(std::max(lhs.size, rhs.size), std::max(lhs.alignment, rhs.alignment));
+ return BlobInfo{ std::max(lhs.size, rhs.size), std::max(lhs.alignment, rhs.alignment) };
});
// Calculate group mappings
diff --git a/src/runtime/CL/CLHelpers.cpp b/src/runtime/CL/CLHelpers.cpp
index 533e6fa..8bc7b8e 100644
--- a/src/runtime/CL/CLHelpers.cpp
+++ b/src/runtime/CL/CLHelpers.cpp
@@ -47,7 +47,7 @@
* @return A pointer to the context properties which can be used to create an opencl context
*/
-void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, cl_context_properties prop[7])
+void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, std::array<cl_context_properties, 7> &prop)
{
ARM_COMPUTE_UNUSED(device);
#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
@@ -55,7 +55,7 @@
if(arm_compute::device_supports_extension(device, "cl_arm_printf"))
{
// Create a cl_context with a printf_callback and user specified buffer size.
- cl_context_properties properties_printf[] =
+ std::array<cl_context_properties, 7> properties_printf =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
// Enable a printf callback function for this context.
@@ -65,17 +65,17 @@
CL_PRINTF_BUFFERSIZE_ARM, 0x1000,
0
};
- std::copy_n(properties_printf, 7, prop);
+ prop = properties_printf;
}
else
#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
{
- cl_context_properties properties[] =
+ std::array<cl_context_properties, 3> properties =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
0
};
- std::copy_n(properties, 3, prop);
+ std::copy(properties.begin(), properties.end(), prop.begin());
};
}
} //namespace
@@ -94,11 +94,11 @@
std::vector<cl::Device> platform_devices;
p.getDevices(CL_DEVICE_TYPE_DEFAULT, &platform_devices);
ARM_COMPUTE_ERROR_ON_MSG(platform_devices.size() == 0, "Couldn't find any OpenCL device");
- device = platform_devices[0];
- cl_int err = CL_SUCCESS;
- cl_context_properties properties[7] = { 0, 0, 0, 0, 0, 0, 0 };
+ device = platform_devices[0];
+ cl_int err = CL_SUCCESS;
+ std::array<cl_context_properties, 7> properties = { 0, 0, 0, 0, 0, 0, 0 };
initialise_context_properties(p, device, properties);
- cl::Context cl_context = cl::Context(device, properties, nullptr, nullptr, &err);
+ cl::Context cl_context = cl::Context(device, properties.data(), nullptr, nullptr, &err);
ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Failed to create OpenCL context");
return std::make_tuple(cl_context, device, err);
}
diff --git a/src/runtime/CL/CLMemory.cpp b/src/runtime/CL/CLMemory.cpp
index 5bea85c..557378b 100644
--- a/src/runtime/CL/CLMemory.cpp
+++ b/src/runtime/CL/CLMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@
{
}
-CLMemory::CLMemory(std::shared_ptr<ICLMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+CLMemory::CLMemory(const std::shared_ptr<ICLMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/CL/CLMultiHOG.cpp b/src/runtime/CL/CLMultiHOG.cpp
index 88d45ac..2577ec0 100644
--- a/src/runtime/CL/CLMultiHOG.cpp
+++ b/src/runtime/CL/CLMultiHOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,9 @@
using namespace arm_compute;
CLMultiHOG::CLMultiHOG(size_t num_models)
- : _num_models(num_models), _model(arm_compute::support::cpp14::make_unique<CLHOG[]>(_num_models))
+ : _num_models(num_models), _model()
{
+ _model.resize(_num_models);
}
size_t CLMultiHOG::num_models() const
@@ -42,11 +43,11 @@
ICLHOG *CLMultiHOG::cl_model(size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
const ICLHOG *CLMultiHOG::cl_model(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
\ No newline at end of file
diff --git a/src/runtime/CL/CLPyramid.cpp b/src/runtime/CL/CLPyramid.cpp
index 865f389..6d5dba0 100644
--- a/src/runtime/CL/CLPyramid.cpp
+++ b/src/runtime/CL/CLPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,7 @@
using namespace arm_compute;
CLPyramid::CLPyramid()
- : _info(), _pyramid(nullptr)
+ : _info(), _pyramid()
{
}
@@ -51,8 +51,8 @@
void CLPyramid::internal_init(const PyramidInfo &info, bool auto_padding)
{
- _info = info;
- _pyramid = arm_compute::support::cpp14::make_unique<CLTensor[]>(_info.num_levels());
+ _info = info;
+ _pyramid.resize(_info.num_levels());
size_t w = _info.width();
size_t h = _info.height();
@@ -109,11 +109,9 @@
void CLPyramid::allocate()
{
- ARM_COMPUTE_ERROR_ON(_pyramid == nullptr);
-
for(size_t i = 0; i < _info.num_levels(); ++i)
{
- (_pyramid.get() + i)->allocator()->allocate();
+ _pyramid[i].allocator()->allocate();
}
}
@@ -126,5 +124,5 @@
{
ARM_COMPUTE_ERROR_ON(index >= _info.num_levels());
- return (_pyramid.get() + index);
+ return &_pyramid[index];
}
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 2ce6455..101e4f1 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -34,7 +34,7 @@
namespace
{
-std::unique_ptr<ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(const cl::Context &context, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(context,
diff --git a/src/runtime/CL/CLTuner.cpp b/src/runtime/CL/CLTuner.cpp
index 8f8d3e7..929def2 100644
--- a/src/runtime/CL/CLTuner.cpp
+++ b/src/runtime/CL/CLTuner.cpp
@@ -275,7 +275,7 @@
std::ofstream fs;
fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
fs.open(filename, std::ios::out);
- for(auto kernel_data : _lws_table)
+ for(auto const &kernel_data : _lws_table)
{
fs << kernel_data.first << ";" << kernel_data.second[0] << ";" << kernel_data.second[1] << ";" << kernel_data.second[2] << std::endl;
}
diff --git a/src/runtime/CL/functions/CLConvolution.cpp b/src/runtime/CL/functions/CLConvolution.cpp
index 2f43ce1..f09585e 100644
--- a/src/runtime/CL/functions/CLConvolution.cpp
+++ b/src/runtime/CL/functions/CLConvolution.cpp
@@ -58,13 +58,13 @@
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(conv == nullptr);
- int16_t conv_col[matrix_size];
- int16_t conv_row[matrix_size];
- _is_separable = separate_matrix(conv, conv_col, conv_row, matrix_size);
+ std::array<int16_t, matrix_size> conv_col{ 0 };
+ std::array<int16_t, matrix_size> conv_row{ 0 };
+ _is_separable = separate_matrix(conv, conv_col.data(), conv_row.data(), matrix_size);
if(_is_separable)
{
- std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col, conv_row, matrix_size);
+ std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col.data(), conv_row.data(), matrix_size);
_tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), 1, type_pair.first));
// Manage intermediate buffers
@@ -75,8 +75,8 @@
scale = calculate_matrix_scale(conv, matrix_size);
}
- _kernel_hor.configure(input, &_tmp, conv_row, border_mode == BorderMode::UNDEFINED);
- _kernel_vert.configure(&_tmp, output, conv_col, scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
+ _kernel_hor.configure(input, &_tmp, conv_row.data(), border_mode == BorderMode::UNDEFINED);
+ _kernel_vert.configure(&_tmp, output, conv_col.data(), scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
_border_handler.configure(input, _kernel_hor.border_size(), border_mode, PixelValue(constant_border_value));
// Allocate intermediate buffer
diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
index 4a5f845..f687e54 100644
--- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
@@ -36,8 +36,7 @@
using namespace arm_compute;
CLDepthConcatenateLayer::CLDepthConcatenateLayer() // NOLINT
- : _inputs_vector(),
- _concat_kernels_vector(),
+ : _concat_kernels_vector(),
_border_handlers_vector(),
_num_inputs(0)
{
@@ -53,8 +52,8 @@
inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
}
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLDepthConcatenateLayerKernel[]>(_num_inputs);
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
+ _border_handlers_vector.resize(_num_inputs);
TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ);
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 8211104..97b0a01 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -322,7 +322,8 @@
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_output_reshaped, biases, output, output_multiplier, output_shift, output_quant_info.offset);
_output_reshaped.allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp
index 67111e7..49b5a2a 100644
--- a/src/runtime/CL/functions/CLFFT1D.cpp
+++ b/src/runtime/CL/functions/CLFFT1D.cpp
@@ -62,7 +62,7 @@
// Create and configure FFT kernels
unsigned int Nx = 1;
_num_ffts = decomposed_vector.size();
- _fft_kernels = arm_compute::support::cpp14::make_unique<CLFFTRadixStageKernel[]>(_num_ffts);
+ _fft_kernels.resize(_num_ffts);
for(unsigned int i = 0; i < _num_ffts; ++i)
{
const unsigned int radix_for_stage = decomposed_vector.at(i);
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 40ce6b4..03d516f 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -372,7 +372,9 @@
const unsigned int kernel_width = weights->dimension(idx_width);
const unsigned int kernel_height = weights->dimension(idx_height);
- TensorInfo im2col_reshaped_info, info_gemm, weights_reshaped_info;
+ TensorInfo im2col_reshaped_info{};
+ TensorInfo info_gemm{};
+ TensorInfo weights_reshaped_info{};
const ITensorInfo *gemm_input_to_use = input;
const ITensorInfo *gemm_output_to_use = output;
const ITensorInfo *weights_to_use = weights;
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index fd82769..b671b23 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,10 +76,10 @@
if(num_levels > 1)
{
- _horizontal_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _vertical_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _horizontal_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidHorKernel[]>(num_levels - 1);
- _vertical_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidVertKernel[]>(num_levels - 1);
+ _horizontal_border_handler.resize(num_levels - 1);
+ _vertical_border_handler.resize(num_levels - 1);
+ _horizontal_reduction.resize(num_levels - 1);
+ _vertical_reduction.resize(num_levels - 1);
// Apply half scale to the X dimension of the tensor shape
TensorShape tensor_shape = pyramid->info()->tensor_shape();
@@ -153,8 +153,8 @@
if(num_levels > 1)
{
- _gauss5x5 = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(num_levels - 1);
- _scale_nearest = arm_compute::support::cpp14::make_unique<CLScaleKernel[]>(num_levels - 1);
+ _gauss5x5.resize(num_levels - 1);
+ _scale_nearest.resize(num_levels - 1);
PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
diff --git a/src/runtime/CL/functions/CLHOGMultiDetection.cpp b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
index 0865f50..f799d61 100644
--- a/src/runtime/CL/functions/CLHOGMultiDetection.cpp
+++ b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
@@ -128,12 +128,11 @@
_num_block_norm_kernel = input_block_norm.size(); // Number of CLHOGBlockNormalizationKernel kernels to compute
_num_hog_detect_kernel = input_hog_detect.size(); // Number of CLHOGDetector functions to compute
- _orient_bin_kernel = arm_compute::support::cpp14::make_unique<CLHOGOrientationBinningKernel[]>(_num_orient_bin_kernel);
- _block_norm_kernel = arm_compute::support::cpp14::make_unique<CLHOGBlockNormalizationKernel[]>(_num_block_norm_kernel);
- _hog_detect_kernel = arm_compute::support::cpp14::make_unique<CLHOGDetector[]>(_num_hog_detect_kernel);
- _non_maxima_kernel = arm_compute::support::cpp14::make_unique<CPPDetectionWindowNonMaximaSuppressionKernel>();
- _hog_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_orient_bin_kernel);
- _hog_norm_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_block_norm_kernel);
+ _orient_bin_kernel.resize(_num_orient_bin_kernel);
+ _block_norm_kernel.resize(_num_block_norm_kernel);
+ _hog_detect_kernel.resize(_num_hog_detect_kernel);
+ _hog_space.resize(_num_orient_bin_kernel);
+ _hog_norm_space.resize(_num_block_norm_kernel);
// Allocate tensors for magnitude and phase
TensorInfo info_mag(shape_img, Format::S16);
@@ -172,10 +171,10 @@
_hog_space[i].allocator()->init(info_space);
// Manage intermediate buffers
- _memory_group.manage(_hog_space.get() + i);
+ _memory_group.manage(&_hog_space[i]);
// Initialise orientation binning kernel
- _orient_bin_kernel[i].configure(&_mag, &_phase, _hog_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _orient_bin_kernel[i].configure(&_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -193,10 +192,10 @@
_hog_norm_space[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_hog_norm_space.get() + i);
+ _memory_group.manage(&_hog_norm_space[i]);
// Initialize block normalization kernel
- _block_norm_kernel[i].configure(_hog_space.get() + idx_orient_bin, _hog_norm_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _block_norm_kernel[i].configure(&_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -212,13 +211,13 @@
{
const size_t idx_block_norm = input_hog_detect[i];
- _hog_detect_kernel[i].configure(_hog_norm_space.get() + idx_block_norm, multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
+ _hog_detect_kernel[i].configure(&_hog_norm_space[idx_block_norm], multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
}
detection_window_strides->unmap(CLScheduler::get().queue());
// Configure non maxima suppression kernel
- _non_maxima_kernel->configure(_detection_windows, min_distance);
+ _non_maxima_kernel.configure(_detection_windows, min_distance);
// Allocate intermediate tensors
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
@@ -242,13 +241,13 @@
// Run orientation binning kernel
for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_orient_bin_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_orient_bin_kernel[i], false);
}
// Run block normalization kernel
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_block_norm_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_block_norm_kernel[i], false);
}
// Run HOG detector kernel
@@ -262,7 +261,7 @@
{
// Map detection windows array before computing non maxima suppression
_detection_windows->map(CLScheduler::get().queue(), true);
- Scheduler::get().schedule(_non_maxima_kernel.get(), Window::DimY);
+ Scheduler::get().schedule(&_non_maxima_kernel, Window::DimY);
_detection_windows->unmap(CLScheduler::get().queue());
}
}
diff --git a/src/runtime/CL/functions/CLHarrisCorners.cpp b/src/runtime/CL/functions/CLHarrisCorners.cpp
index 342d1ca..67f550d 100644
--- a/src/runtime/CL/functions/CLHarrisCorners.cpp
+++ b/src/runtime/CL/functions/CLHarrisCorners.cpp
@@ -55,7 +55,7 @@
_gy(),
_score(),
_nonmax(),
- _corners_list(nullptr),
+ _corners_list(),
_num_corner_candidates(0),
_corners(nullptr)
{
@@ -84,7 +84,7 @@
_score.allocator()->init(info_f32);
_nonmax.allocator()->init(info_f32);
- _corners_list = arm_compute::support::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
+ _corners_list.resize(shape.x() * shape.y());
// Manage intermediate buffers
_memory_group.manage(&_gx);
@@ -146,13 +146,13 @@
_score.allocator()->allocate();
// Init corner candidates kernel
- _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates);
+ _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates);
// Allocate intermediate buffers
_nonmax.allocator()->allocate();
// Init euclidean distance
- _sort_euclidean.configure(_corners_list.get(), _corners, &_num_corner_candidates, min_dist);
+ _sort_euclidean.configure(_corners_list.data(), _corners, &_num_corner_candidates, min_dist);
}
void CLHarrisCorners::run()
diff --git a/src/runtime/CL/functions/CLLaplacianPyramid.cpp b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
index 559b57f..a118518 100644
--- a/src/runtime/CL/functions/CLLaplacianPyramid.cpp
+++ b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,8 +70,8 @@
// Create Gaussian Pyramid function
_gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
- _convf = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(_num_levels);
- _subf = arm_compute::support::cpp14::make_unique<CLArithmeticSubtraction[]>(_num_levels);
+ _convf.resize(_num_levels);
+ _subf.resize(_num_levels);
for(unsigned int i = 0; i < _num_levels; ++i)
{
diff --git a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
index 911c9b3..13116bf 100644
--- a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
+++ b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,8 +63,8 @@
_tmp_pyr.init(pyramid_info);
// Allocate add and scale functions. Level 0 does not need to be scaled.
- _addf = arm_compute::support::cpp14::make_unique<CLArithmeticAddition[]>(num_levels);
- _scalef = arm_compute::support::cpp14::make_unique<CLScale[]>(num_levels - 1);
+ _addf.resize(num_levels);
+ _scalef.resize(num_levels - 1);
const size_t last_level = num_levels - 1;
@@ -85,7 +85,7 @@
void CLLaplacianReconstruct::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function");
+ ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function");
const size_t last_level = _tmp_pyr.info()->num_levels() - 1;
diff --git a/src/runtime/CL/functions/CLOpticalFlow.cpp b/src/runtime/CL/functions/CLOpticalFlow.cpp
index 7ef1c83..a013a1f 100644
--- a/src/runtime/CL/functions/CLOpticalFlow.cpp
+++ b/src/runtime/CL/functions/CLOpticalFlow.cpp
@@ -84,12 +84,12 @@
const int old_values_list_length = list_length * window_dimension * window_dimension;
// Create kernels and tensors
- _tracker_init_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerInitKernel[]>(_num_levels);
- _tracker_stage0_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage0Kernel[]>(_num_levels);
- _tracker_stage1_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage1Kernel[]>(_num_levels);
- _func_scharr = arm_compute::support::cpp14::make_unique<CLScharr3x3[]>(_num_levels);
- _scharr_gx = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
- _scharr_gy = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
+ _tracker_init_kernel.resize(_num_levels);
+ _tracker_stage0_kernel.resize(_num_levels);
+ _tracker_stage1_kernel.resize(_num_levels);
+ _func_scharr.resize(_num_levels);
+ _scharr_gx.resize(_num_levels);
+ _scharr_gy.resize(_num_levels);
// Create internal keypoint arrays
_old_points_internal = arm_compute::support::cpp14::make_unique<CLLKInternalKeypointArray>(list_length);
@@ -118,8 +118,8 @@
_scharr_gy[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_scharr_gx.get() + i);
- _memory_group.manage(_scharr_gy.get() + i);
+ _memory_group.manage(&_scharr_gx[i]);
+ _memory_group.manage(&_scharr_gy[i]);
// Init Scharr kernel
_func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index dba7f23..99e3121 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -31,7 +31,7 @@
namespace arm_compute
{
CLPadLayer::CLPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr)
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
{
}
@@ -67,11 +67,16 @@
// Two strided slice functions will be required for each dimension padded as well as a
// concatenate function and the tensors to hold the temporary results.
- _slice_functions = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(2 * _num_dimensions);
- _slice_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(2 * _num_dimensions);
- _concat_functions = arm_compute::support::cpp14::make_unique<CLConcatenateLayer[]>(_num_dimensions);
- _concat_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_dimensions - 1);
- Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ _slice_functions.resize(2 * _num_dimensions);
+ _slice_results.resize(2 * _num_dimensions);
+ _concat_functions.resize(_num_dimensions);
+ _concat_results.resize(_num_dimensions - 1);
+
+ Coordinates starts_before{};
+ Coordinates ends_before{};
+ Coordinates starts_after{};
+ Coordinates ends_after{};
+ Coordinates strides{};
ICLTensor *prev = input;
for(uint32_t i = 0; i < _num_dimensions; ++i)
{
diff --git a/src/runtime/CL/functions/CLReduceMean.cpp b/src/runtime/CL/functions/CLReduceMean.cpp
index 702ce34..15091f90 100644
--- a/src/runtime/CL/functions/CLReduceMean.cpp
+++ b/src/runtime/CL/functions/CLReduceMean.cpp
@@ -40,10 +40,10 @@
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- _reduction_ops = reduction_axis.num_dimensions();
- _reduction_kernels = arm_compute::support::cpp14::make_unique<CLReductionOperation[]>(_reduction_ops);
- _reduced_outs = arm_compute::support::cpp14::make_unique<CLTensor[]>(_reduction_ops - (keep_dims ? 1 : 0));
- _keep_dims = keep_dims;
+ _reduction_ops = reduction_axis.num_dimensions();
+ _reduction_kernels.resize(_reduction_ops);
+ _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
+ _keep_dims = keep_dims;
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
@@ -57,9 +57,9 @@
// Perform reduction for every axis
for(unsigned int i = 0; i < _reduction_ops; ++i)
{
- TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape();
+ TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
- auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1);
+ auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
if(i == _reduction_ops - 1 && keep_dims)
{
@@ -68,8 +68,8 @@
else
{
_reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
- _memory_group.manage(_reduced_outs.get() + i);
- _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM);
+ _memory_group.manage(&_reduced_outs[i]);
+ _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
}
}
@@ -92,7 +92,7 @@
out_shape.remove_dimension(axis_local[i] - i);
}
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
- _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output);
+ _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
}
}
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index bb285d7..9f99d2d 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -71,7 +71,7 @@
else
{
// Create temporary tensor infos
- auto sums_vector = arm_compute::support::cpp14::make_unique<TensorInfo[]>(num_of_stages - 1);
+ std::vector<TensorInfo> sums_vector(num_of_stages - 1);
// Create intermediate tensor info
TensorShape shape{ input->tensor_shape() };
@@ -110,17 +110,17 @@
}
// Validate ReductionOperation only on first kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, sums_vector.get(), axis, first_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op));
// Validate ReductionOperation on intermediate stages
for(unsigned int i = 1; i < num_of_stages - 1; ++i)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + i - 1, sums_vector.get() + i, axis, intermediate_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op));
}
// Validate ReductionOperation on the last stage
const unsigned int last_stage = num_of_stages - 1;
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + last_stage - 1, output, axis, last_kernel_op, input->dimension(0)));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output, axis, last_kernel_op, input->dimension(0)));
}
return Status{};
@@ -133,7 +133,7 @@
_is_serial = is_data_type_quantized(input->info()->data_type()) || axis != 0;
// Configure reduction operation kernels
- _reduction_kernels_vector = arm_compute::support::cpp14::make_unique<CLReductionOperationKernel[]>(_num_of_stages);
+ _reduction_kernels_vector.resize(_num_of_stages);
// Create temporary tensors
if(_is_serial)
@@ -142,8 +142,8 @@
}
else
{
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_of_stages);
- _results_vector = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_of_stages - 1);
+ _border_handlers_vector.resize(_num_of_stages);
+ _results_vector.resize(_num_of_stages - 1);
TensorShape shape{ input->info()->tensor_shape() };
for(unsigned int i = 0; i < _num_of_stages - 1; i++)
{
@@ -152,7 +152,7 @@
}
// Apply ReductionOperation only on first kernel
- _memory_group.manage(_results_vector.get());
+ _memory_group.manage(&_results_vector[0]);
ReductionOperation first_kernel_op;
ReductionOperation intermediate_kernel_op;
@@ -183,23 +183,23 @@
ARM_COMPUTE_ERROR("Not supported");
}
- _reduction_kernels_vector[0].configure(input, _results_vector.get(), axis, first_kernel_op);
+ _reduction_kernels_vector[0].configure(input, &_results_vector[0], axis, first_kernel_op);
_border_handlers_vector[0].configure(input, _reduction_kernels_vector[0].border_size(), BorderMode::CONSTANT, pixelValue);
// Apply ReductionOperation on intermediate stages
for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
{
- _memory_group.manage(_results_vector.get() + i);
- _reduction_kernels_vector[i].configure(_results_vector.get() + i - 1, _results_vector.get() + i, axis, intermediate_kernel_op);
- _border_handlers_vector[i].configure(_results_vector.get() + i - 1, _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
+ _memory_group.manage(&_results_vector[i]);
+ _reduction_kernels_vector[i].configure(&_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
+ _border_handlers_vector[i].configure(&_results_vector[i - 1], _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[i - 1].allocator()->allocate();
}
// Apply ReductionOperation on the last stage
const unsigned int last_stage = _num_of_stages - 1;
const unsigned int input_width = input->info()->dimension(0);
- _reduction_kernels_vector[last_stage].configure(_results_vector.get() + last_stage - 1, output, axis, last_kernel_op, input_width);
- _border_handlers_vector[last_stage].configure(_results_vector.get() + last_stage - 1, _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
+ _reduction_kernels_vector[last_stage].configure(&_results_vector[last_stage - 1], output, axis, last_kernel_op, input_width);
+ _border_handlers_vector[last_stage].configure(&_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[last_stage - 1].allocator()->allocate();
}
}
diff --git a/src/runtime/CL/functions/CLSplit.cpp b/src/runtime/CL/functions/CLSplit.cpp
index f084351..8d37d53 100644
--- a/src/runtime/CL/functions/CLSplit.cpp
+++ b/src/runtime/CL/functions/CLSplit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,8 +42,8 @@
void CLSplit::configure(const ICLTensor *input, const std::vector<ICLTensor *> &outputs, unsigned int axis)
{
// Create Slice functions
- _num_outputs = outputs.size();
- _slice_functions = arm_compute::support::cpp14::make_unique<CLSlice[]>(_num_outputs);
+ _num_outputs = outputs.size();
+ _slice_functions.resize(_num_outputs);
// Get output shape
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs);
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
index 71327fe..2700b49 100644
--- a/src/runtime/CL/functions/CLStackLayer.cpp
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -46,8 +46,8 @@
void CLStackLayer::configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output)
{
- _num_inputs = input.size();
- _stack_kernels = arm_compute::support::cpp14::make_unique<CLStackLayerKernel[]>(_num_inputs);
+ _num_inputs = input.size();
+ _stack_kernels.resize(_num_inputs);
// Wrap around negative values
const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
diff --git a/src/runtime/CL/functions/CLUnstack.cpp b/src/runtime/CL/functions/CLUnstack.cpp
index 428d091..eb1dd8c 100644
--- a/src/runtime/CL/functions/CLUnstack.cpp
+++ b/src/runtime/CL/functions/CLUnstack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,7 +74,7 @@
// Wrap around negative values
const unsigned int axis_u = wrap_axis(axis, input->info());
_num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u));
- _strided_slice_vector = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(_num_slices);
+ _strided_slice_vector.resize(_num_slices);
Coordinates slice_start;
int32_t slice_end_mask;
diff --git a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
index 6e42377..a8667c3 100644
--- a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
@@ -109,7 +109,7 @@
break;
default:
// Configure generic case WidthConcatenate kernels
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLWidthConcatenateLayerKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
unsigned int width_offset = 0;
for(unsigned int i = 0; i < _num_inputs; ++i)
diff --git a/src/runtime/CL/tuners/CLLWSList.cpp b/src/runtime/CL/tuners/CLLWSList.cpp
index 97134b1..6eb2514 100644
--- a/src/runtime/CL/tuners/CLLWSList.cpp
+++ b/src/runtime/CL/tuners/CLLWSList.cpp
@@ -36,7 +36,7 @@
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(coords[0] + 1, coords[1] + 1, coords[2] + 1);
+ return cl::NDRange{ coords[0] + 1U, coords[1] + 1U, coords[2] + 1U };
}
CLLWSListExhaustive::CLLWSListExhaustive(const cl::NDRange &gws)
@@ -49,7 +49,7 @@
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(_lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]]);
+ return cl::NDRange{ _lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]] };
}
CLLWSListNormal::CLLWSListNormal(const cl::NDRange &gws)
diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
index 79e619c..9a141cb 100644
--- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
@@ -600,7 +600,7 @@
if(_info.keep_top_k() > -1 && num_det > _info.keep_top_k())
{
std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
- for(auto it : indices)
+ for(auto const &it : indices)
{
const int label = it.first;
const std::vector<int> &label_indices = it.second;
@@ -614,7 +614,7 @@
for(auto idx : label_indices)
{
ARM_COMPUTE_ERROR_ON(idx > static_cast<int>(scores.size()));
- score_index_pairs.push_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
+ score_index_pairs.emplace_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
}
}
diff --git a/src/runtime/CPUUtils.cpp b/src/runtime/CPUUtils.cpp
index f3355a7..f7240db 100644
--- a/src/runtime/CPUUtils.cpp
+++ b/src/runtime/CPUUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,16 +54,16 @@
/* Make sure the bits we care about are defined, just in case asm/hwcap.h is
* out of date (or for bare metal mode) */
#ifndef HWCAP_ASIMDHP
-#define HWCAP_ASIMDHP (1 << 10)
-#endif /* HWCAP_ASIMDHP */
+#define HWCAP_ASIMDHP (1 << 10) // NOLINT
+#endif /* HWCAP_ASIMDHP */
#ifndef HWCAP_CPUID
-#define HWCAP_CPUID (1 << 11)
-#endif /* HWCAP_CPUID */
+#define HWCAP_CPUID (1 << 11) // NOLINT
+#endif /* HWCAP_CPUID */
#ifndef HWCAP_ASIMDDP
-#define HWCAP_ASIMDDP (1 << 20)
-#endif /* HWCAP_ASIMDDP */
+#define HWCAP_ASIMDDP (1 << 20) // NOLINT
+#endif /* HWCAP_ASIMDDP */
namespace
{
@@ -146,12 +146,12 @@
break;
}
}
- else if(implementer == 0x48) // HiSilicon CPUs
+ else if(implementer == 0x48)
{
// Only CPUs we have code paths for are detected. All other CPUs can be safely classed as "GENERIC"
switch(cpunum)
{
- case 0xd40: // A76 (Kirin 980)
+ case 0xd40: // A76
model = CPUModel::GENERIC_FP16_DOT;
break;
default:
@@ -220,8 +220,8 @@
while(bool(getline(file, line)))
{
- regmatch_t match[2];
- ret_status = regexec(&proc_regex, line.c_str(), 2, match, 0);
+ std::array<regmatch_t, 2> match;
+ ret_status = regexec(&proc_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string id = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -244,7 +244,7 @@
continue;
}
- ret_status = regexec(&imp_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&imp_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -254,7 +254,7 @@
continue;
}
- ret_status = regexec(&var_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&var_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -264,7 +264,7 @@
continue;
}
- ret_status = regexec(&part_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&part_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -274,7 +274,7 @@
continue;
}
- ret_status = regexec(&rev_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&rev_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -302,8 +302,7 @@
int get_max_cpus()
{
- int max_cpus = 1;
-#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+ int max_cpus = 1;
std::ifstream CPUspresent;
CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
bool success = false;
@@ -341,7 +340,6 @@
{
max_cpus = std::thread::hardware_concurrency();
}
-#endif /* BARE_METAL */
return max_cpus;
}
#endif /* !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__)) */
@@ -427,8 +425,8 @@
std::string line;
while(bool(getline(cpuinfo, line)))
{
- regmatch_t match[2];
- ret_status = regexec(&cpu_part_rgx, line.c_str(), 2, match, 0);
+ std::array<regmatch_t, 2> match;
+ ret_status = regexec(&cpu_part_rgx, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string cpu_part = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
diff --git a/src/runtime/Distribution1D.cpp b/src/runtime/Distribution1D.cpp
index 3431834..9e6fce4 100644
--- a/src/runtime/Distribution1D.cpp
+++ b/src/runtime/Distribution1D.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,12 +31,11 @@
using namespace arm_compute;
Distribution1D::Distribution1D(size_t num_bins, int32_t offset, uint32_t range)
- : IDistribution1D(num_bins, offset, range), _data(arm_compute::support::cpp14::make_unique<uint32_t[]>(num_bins))
+ : IDistribution1D(num_bins, offset, range), _data(num_bins)
{
}
uint32_t *Distribution1D::buffer() const
{
- ARM_COMPUTE_ERROR_ON(nullptr == _data);
- return _data.get();
+ return _data.data();
}
diff --git a/src/runtime/GLES_COMPUTE/GCMemory.cpp b/src/runtime/GLES_COMPUTE/GCMemory.cpp
index fed4a15..f1457c4 100644
--- a/src/runtime/GLES_COMPUTE/GCMemory.cpp
+++ b/src/runtime/GLES_COMPUTE/GCMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@
{
}
-GCMemory::GCMemory(std::shared_ptr<IGCMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+GCMemory::GCMemory(const std::shared_ptr<IGCMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/GLES_COMPUTE/GCScheduler.cpp b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
index f781273..6a39e7c 100644
--- a/src/runtime/GLES_COMPUTE/GCScheduler.cpp
+++ b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,7 +97,7 @@
ARM_COMPUTE_ERROR_ON_MSG((strstr(egl_extension_st, "EGL_KHR_surfaceless_context") == nullptr), "Failed to query EGL_KHR_surfaceless_context");
ARM_COMPUTE_UNUSED(egl_extension_st);
- const EGLint config_attribs[] =
+ const std::array<EGLint, 3> config_attribs =
{
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT_KHR,
EGL_NONE
@@ -105,7 +105,7 @@
EGLConfig cfg;
EGLint count;
- res = eglChooseConfig(_display, config_attribs, &cfg, 1, &count);
+ res = eglChooseConfig(_display, config_attribs.data(), &cfg, 1, &count);
ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to choose config: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
@@ -114,7 +114,7 @@
ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to bind api: 0x%x.", eglGetError());
- const EGLint attribs[] =
+ const std::array<EGLint, 3> attribs =
{
EGL_CONTEXT_CLIENT_VERSION, 3,
EGL_NONE
@@ -122,7 +122,7 @@
_context = eglCreateContext(_display,
cfg,
EGL_NO_CONTEXT,
- attribs);
+ attribs.data());
ARM_COMPUTE_ERROR_ON_MSG(_context == EGL_NO_CONTEXT, "Failed to create context: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
diff --git a/src/runtime/HOG.cpp b/src/runtime/HOG.cpp
index 01640bb..e9f38c4 100644
--- a/src/runtime/HOG.cpp
+++ b/src/runtime/HOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,20 +29,19 @@
using namespace arm_compute;
HOG::HOG()
- : IHOG(), _info(), _descriptor(nullptr)
+ : IHOG(), _info(), _descriptor()
{
}
void HOG::init(const HOGInfo &input)
{
- ARM_COMPUTE_ERROR_ON(nullptr != _descriptor);
- _info = input;
- _descriptor = arm_compute::support::cpp14::make_unique<float[]>(_info.descriptor_size());
+ _info = input;
+ _descriptor.resize(_info.descriptor_size());
}
float *HOG::descriptor() const
{
- return _descriptor.get();
+ return _descriptor.data();
}
const HOGInfo *HOG::info() const
diff --git a/src/runtime/LutAllocator.cpp b/src/runtime/LutAllocator.cpp
index eb9051c..0db5217 100644
--- a/src/runtime/LutAllocator.cpp
+++ b/src/runtime/LutAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,23 +28,23 @@
using namespace arm_compute;
LutAllocator::LutAllocator()
- : _buffer(nullptr)
+ : _buffer()
{
}
uint8_t *LutAllocator::data() const
{
- return _buffer.get();
+ return _buffer.data();
}
void LutAllocator::allocate()
{
- _buffer = arm_compute::support::cpp14::make_unique<uint8_t[]>(size());
+ _buffer.resize(size());
}
uint8_t *LutAllocator::lock()
{
- return _buffer.get();
+ return _buffer.data();
}
void LutAllocator::unlock()
diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp
index d116624..c6b956d 100644
--- a/src/runtime/Memory.cpp
+++ b/src/runtime/Memory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,8 +32,8 @@
{
}
-Memory::Memory(std::shared_ptr<IMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+Memory::Memory(const std::shared_ptr<IMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/MultiHOG.cpp b/src/runtime/MultiHOG.cpp
index e0b60b1..154bbd7 100644
--- a/src/runtime/MultiHOG.cpp
+++ b/src/runtime/MultiHOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,9 @@
using namespace arm_compute;
MultiHOG::MultiHOG(size_t num_models)
- : _num_models(num_models), _model(arm_compute::support::cpp14::make_unique<HOG[]>(_num_models))
+ : _num_models(num_models), _model()
{
+ _model.resize(_num_models);
}
size_t MultiHOG::num_models() const
@@ -42,11 +43,11 @@
IHOG *MultiHOG::model(size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
const IHOG *MultiHOG::model(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index b8cfa2b..71af560 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -51,6 +51,7 @@
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
+ inputs_vector_info.reserve(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; ++i)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i));
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index fe1f2da..55e067f 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -35,7 +35,7 @@
{
namespace
{
-std::unique_ptr<IFunction> create_function_all_types(arm_gemm::KernelDescription gemm_kernel_info,
+std::unique_ptr<IFunction> create_function_all_types(const arm_gemm::KernelDescription &gemm_kernel_info,
const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
std::shared_ptr<IMemoryManager> memory_manager)
diff --git a/src/runtime/NEON/functions/NEHarrisCorners.cpp b/src/runtime/NEON/functions/NEHarrisCorners.cpp
index 15cecc2..3eadbee 100644
--- a/src/runtime/NEON/functions/NEHarrisCorners.cpp
+++ b/src/runtime/NEON/functions/NEHarrisCorners.cpp
@@ -90,7 +90,7 @@
_score.allocator()->init(tensor_info_score);
_nonmax.allocator()->init(tensor_info_score);
- _corners_list = arm_compute::support::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
+ _corners_list.resize(shape.x() * shape.y());
// Set/init Sobel kernel accordingly with gradient_size
switch(gradient_size)
@@ -171,13 +171,13 @@
_score.allocator()->allocate();
// Init corner candidates kernel
- _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates);
+ _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates);
// Allocate once all the configure methods have been called
_nonmax.allocator()->allocate();
// Init euclidean distance
- _sort_euclidean.configure(_corners_list.get(), corners, &_num_corner_candidates, min_dist);
+ _sort_euclidean.configure(_corners_list.data(), corners, &_num_corner_candidates, min_dist);
}
void NEHarrisCorners::run()
diff --git a/src/runtime/NEON/functions/NEHistogram.cpp b/src/runtime/NEON/functions/NEHistogram.cpp
index f333ecb..d56bd7c 100644
--- a/src/runtime/NEON/functions/NEHistogram.cpp
+++ b/src/runtime/NEON/functions/NEHistogram.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@
using namespace arm_compute;
NEHistogram::NEHistogram()
- : _histogram_kernel(), _local_hist(), _window_lut(arm_compute::support::cpp14::make_unique<uint32_t[]>(window_lut_default_size)), _local_hist_size(0)
+ : _histogram_kernel(), _local_hist(), _window_lut(window_lut_default_size), _local_hist_size(0)
{
}
@@ -45,10 +45,10 @@
// Allocate space for threads local histograms
_local_hist_size = output->num_bins() * NEScheduler::get().num_threads();
- _local_hist = arm_compute::support::cpp14::make_unique<uint32_t[]>(_local_hist_size);
+ _local_hist.resize(_local_hist_size);
// Configure kernel
- _histogram_kernel.configure(input, output, _local_hist.get(), _window_lut.get());
+ _histogram_kernel.configure(input, output, _local_hist.data(), _window_lut.data());
}
void NEHistogram::run()
diff --git a/src/runtime/NEON/functions/NELaplacianPyramid.cpp b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
index 0e149d4..5174a13 100644
--- a/src/runtime/NEON/functions/NELaplacianPyramid.cpp
+++ b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -92,8 +92,8 @@
// Create Gaussian Pyramid function
_gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
- _convf = arm_compute::support::cpp14::make_unique<NEGaussian5x5[]>(_num_levels);
- _subf = arm_compute::support::cpp14::make_unique<NEArithmeticSubtraction[]>(_num_levels);
+ _convf.resize(_num_levels);
+ _subf.resize(_num_levels);
for(unsigned int i = 0; i < _num_levels; ++i)
{
diff --git a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
index 9ad9689..b2d889b 100644
--- a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
+++ b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,8 +64,8 @@
_tmp_pyr.init(pyramid_info);
// Allocate add and scale functions. Level 0 does not need to be scaled.
- _addf = arm_compute::support::cpp14::make_unique<NEArithmeticAddition[]>(num_levels);
- _scalef = arm_compute::support::cpp14::make_unique<NEScale[]>(num_levels - 1);
+ _addf.resize(num_levels);
+ _scalef.resize(num_levels - 1);
const size_t last_level = num_levels - 1;
@@ -86,7 +86,7 @@
void NELaplacianReconstruct::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function");
+ ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function");
const size_t last_level = _tmp_pyr.info()->num_levels() - 1;
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 6af2ee8..c608edf 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -76,8 +76,7 @@
} // namespace
NEPadLayer::NEPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr),
- _output_subtensor()
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor()
{
}
@@ -108,11 +107,16 @@
// Two strided slice functions will be required for each dimension padded as well as a
// concatenate function and the tensors to hold the temporary results.
- _slice_functions = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(2 * _num_dimensions);
- _slice_results = arm_compute::support::cpp14::make_unique<Tensor[]>(2 * _num_dimensions);
- _concat_functions = arm_compute::support::cpp14::make_unique<NEConcatenateLayer[]>(_num_dimensions);
- _concat_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_dimensions - 1);
- Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ _slice_functions.resize(2 * _num_dimensions);
+ _slice_results.resize(2 * _num_dimensions);
+ _concat_functions.resize(_num_dimensions);
+ _concat_results.resize(_num_dimensions - 1);
+
+ Coordinates starts_before{};
+ Coordinates ends_before{};
+ Coordinates starts_after{};
+ Coordinates ends_after{};
+ Coordinates strides{};
ITensor *prev = input;
for(uint32_t i = 0; i < _num_dimensions; ++i)
{
@@ -158,7 +162,7 @@
if(i < prev->info()->num_dimensions())
{
_slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
- concat_vector.push_back(&_slice_results[2 * i]);
+ concat_vector.emplace_back(&_slice_results[2 * i]);
}
else
{
@@ -172,7 +176,7 @@
if(i < prev->info()->num_dimensions())
{
_slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
- concat_vector.push_back(&_slice_results[2 * i + 1]);
+ concat_vector.emplace_back(&_slice_results[2 * i + 1]);
}
else
{
diff --git a/src/runtime/NEON/functions/NEReduceMean.cpp b/src/runtime/NEON/functions/NEReduceMean.cpp
index 98d3ab9..38adaa2 100644
--- a/src/runtime/NEON/functions/NEReduceMean.cpp
+++ b/src/runtime/NEON/functions/NEReduceMean.cpp
@@ -78,10 +78,10 @@
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- _reduction_ops = reduction_axis.num_dimensions();
- _reduction_kernels = arm_compute::support::cpp14::make_unique<NEReductionOperation[]>(_reduction_ops);
- _reduced_outs = arm_compute::support::cpp14::make_unique<Tensor[]>(_reduction_ops - (keep_dims ? 1 : 0));
- _keep_dims = keep_dims;
+ _reduction_ops = reduction_axis.num_dimensions();
+ _reduction_kernels.resize(_reduction_ops);
+ _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
+ _keep_dims = keep_dims;
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
@@ -96,9 +96,9 @@
// Perform reduction for every axis
for(unsigned int i = 0; i < _reduction_ops; ++i)
{
- TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape();
+ TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
- auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1);
+ auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
if(i == _reduction_ops - 1 && keep_dims)
{
@@ -107,8 +107,8 @@
else
{
_reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
- _memory_group.manage(_reduced_outs.get() + i);
- _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM);
+ _memory_group.manage(&_reduced_outs[i]);
+ _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
}
}
@@ -131,7 +131,7 @@
out_shape.remove_dimension(axis_local[i] - i);
}
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
- _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output);
+ _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
}
}
diff --git a/src/runtime/NEON/functions/NESplit.cpp b/src/runtime/NEON/functions/NESplit.cpp
index e947657..0373ab6 100644
--- a/src/runtime/NEON/functions/NESplit.cpp
+++ b/src/runtime/NEON/functions/NESplit.cpp
@@ -42,8 +42,8 @@
void NESplit::configure(const ITensor *input, const std::vector<ITensor *> &outputs, unsigned int axis)
{
// Create Slice functions
- _num_outputs = outputs.size();
- _slice_functions = arm_compute::support::cpp14::make_unique<NESlice[]>(_num_outputs);
+ _num_outputs = outputs.size();
+ _slice_functions.resize(_num_outputs);
// Get output shape
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs);
diff --git a/src/runtime/NEON/functions/NEStackLayer.cpp b/src/runtime/NEON/functions/NEStackLayer.cpp
index 2f49c22..32350b0 100644
--- a/src/runtime/NEON/functions/NEStackLayer.cpp
+++ b/src/runtime/NEON/functions/NEStackLayer.cpp
@@ -43,8 +43,8 @@
void NEStackLayer::configure(const std::vector<ITensor *> &input, int axis, ITensor *output)
{
- _num_inputs = input.size();
- _stack_kernels = arm_compute::support::cpp14::make_unique<NEStackLayerKernel[]>(_num_inputs);
+ _num_inputs = input.size();
+ _stack_kernels.resize(_num_inputs);
// Wrap around negative values
const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
diff --git a/src/runtime/NEON/functions/NEUnstack.cpp b/src/runtime/NEON/functions/NEUnstack.cpp
index 7532020..21f35f8 100644
--- a/src/runtime/NEON/functions/NEUnstack.cpp
+++ b/src/runtime/NEON/functions/NEUnstack.cpp
@@ -74,7 +74,7 @@
// Wrap around negative values
const unsigned int axis_u = wrap_axis(axis, input->info());
_num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u));
- _strided_slice_vector = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(_num_slices);
+ _strided_slice_vector.resize(_num_slices);
Coordinates slice_start;
int32_t slice_end_mask;
diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
index 9fce13c..25b5216 100644
--- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
@@ -79,7 +79,7 @@
unsigned int width_offset = 0;
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<NEWidthConcatenateLayerKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; ++i)
{
@@ -112,6 +112,6 @@
{
for(unsigned i = 0; i < _num_inputs; ++i)
{
- NEScheduler::get().schedule(_concat_kernels_vector.get() + i, Window::DimY);
+ NEScheduler::get().schedule(&_concat_kernels_vector[i], Window::DimY);
}
}
diff --git a/src/runtime/Pyramid.cpp b/src/runtime/Pyramid.cpp
index ebd6570..bc7b550 100644
--- a/src/runtime/Pyramid.cpp
+++ b/src/runtime/Pyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,8 +45,8 @@
void Pyramid::internal_init(const PyramidInfo &info, bool auto_padding)
{
- _info = info;
- _pyramid = arm_compute::support::cpp14::make_unique<Tensor[]>(_info.num_levels());
+ _info = info;
+ _pyramid.resize(_info.num_levels());
size_t w = _info.width();
size_t h = _info.height();
@@ -56,11 +56,11 @@
TensorShape tensor_shape = _info.tensor_shape();
// Note: Look-up table used by the OpenVX sample implementation
- const float c_orbscale[4] = { 0.5f,
- SCALE_PYRAMID_ORB,
- SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB,
- SCALE_PYRAMID_ORB *SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB
- };
+ const std::array<float, 4> c_orbscale = { 0.5f,
+ SCALE_PYRAMID_ORB,
+ SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB,
+ SCALE_PYRAMID_ORB *SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB
+ };
for(size_t i = 0; i < _info.num_levels(); ++i)
{
@@ -71,7 +71,7 @@
tensor_info.auto_padding();
}
- (_pyramid.get() + i)->allocator()->init(tensor_info);
+ _pyramid[i].allocator()->init(tensor_info);
if(is_orb_scale)
{
@@ -99,11 +99,9 @@
void Pyramid::allocate()
{
- ARM_COMPUTE_ERROR_ON(_pyramid == nullptr);
-
for(size_t i = 0; i < _info.num_levels(); ++i)
{
- (_pyramid.get() + i)->allocator()->allocate();
+ _pyramid[i].allocator()->allocate();
}
}
@@ -116,5 +114,5 @@
{
ARM_COMPUTE_ERROR_ON(index >= _info.num_levels());
- return (_pyramid.get() + index);
-}
+ return &_pyramid[index];
+}
\ No newline at end of file