blob: fe633368c038b39a45e29014ce80504157ed7d4e [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h"
25
26#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
29#include "arm_compute/core/TensorInfo.h"
Pablo Tello3d4968a2017-12-04 15:03:35 +000030#include "support/ToolchainSupport.h"
31
32#include "src/core/NEON/kernels/winograd/winograd_shim_nchw.hpp"
33
34using T = winograd_shim_nchw::Winograd2x2_3x3GEMM<float, float>;
Pablo Tello89519332017-11-17 11:52:36 +000035
36namespace arm_compute
37{
Pablo Tello3d4968a2017-12-04 15:03:35 +000038class Winograd3x3F32::Private
39{
40public:
41 Private(const KernelShape &kernel_shape, const Tensor4DShape input_shape, const PaddingType padding_type, void *kernel_storage)
42 : convolver(kernel_shape, input_shape, padding_type, kernel_storage)
43 {
44 }
45
46 T convolver;
47};
48
49Winograd3x3F32::~Winograd3x3F32()
50{
51}
52
53void Winograd3x3F32::nchw2nhwc(const Tensor4DShape &input_shape, const PaddingType padding_type, void *working_space, const void *const input)
54{
55 _pimpl->convolver.nchw2nhwc(input_shape, padding_type, working_space, reinterpret_cast<const float *>(input));
56}
57
58void Winograd3x3F32::nhwc2nchw(const Tensor4DShape &input_shape, const PaddingType padding_type, void *working_space, void *const output)
59{
60 _pimpl->convolver.nhwc2nchw(input_shape, padding_type, working_space, reinterpret_cast<float *const>(output));
61}
62
63void Winograd3x3F32::transform_weights(const void *const kernel, void *transform_working_space)
64{
65 _pimpl->convolver.transform_weights(reinterpret_cast<const float *>(kernel), transform_working_space);
66}
67
68void Winograd3x3F32::reshape_input(const Tensor4DShape &input_shape, const PaddingType padding_type, const void *const input, void *working_space)
69{
70 _pimpl->convolver.reshape_input(input_shape, padding_type, reinterpret_cast<const float *>(input), working_space);
71}
72
73void Winograd3x3F32::reshape_output(const Tensor4DShape &input_shape, const PaddingType padding_type, void *const output)
74{
75#if defined(__aarch64__)
76 _pimpl->convolver.reshape_output(input_shape, padding_type, reinterpret_cast<float *const>(output));
77#else /* __aarch64__ */
78 ARM_COMPUTE_UNUSED(input_shape);
79 ARM_COMPUTE_UNUSED(padding_type);
80 ARM_COMPUTE_UNUSED(output);
81 ARM_COMPUTE_ERROR("Not implemented");
82#endif /* __aarch64__ */
83}
84
85std::pair<void *, void *> Winograd3x3F32::get_nhwc_ptrs(const Tensor4DShape &input_shape, const PaddingType padding_type, void *working_space)
86{
87 return _pimpl->convolver.get_nhwc_ptrs(input_shape, padding_type, working_space);
88}
89
90Winograd3x3F32::Winograd3x3F32(const KernelShape &kernel_shape, const Tensor4DShape input_shape, const PaddingType padding_type, void *kernel_storage)
91 : _pimpl(support::cpp14::make_unique<Private>(kernel_shape, input_shape, padding_type, kernel_storage))
92{
93}
94
95size_t NEWinogradLayerKernel::get_kernel_storage_size(const KernelShape &shape)
96{
97 return T::get_kernel_storage_size(shape);
98}
99
100size_t NEWinogradLayerKernel::get_working_space_size(const Tensor4DShape &input_shape, const KernelShape &k_shape, const PaddingType padding)
101{
102 return T::get_working_space_size(input_shape, k_shape, padding);
103}
104
105size_t NEWinogradLayerKernel::get_kernel_transform_working_size(const KernelShape &shape)
106{
107 return T::get_kernel_transform_working_size(shape);
108}
109
Pablo Tello89519332017-11-17 11:52:36 +0000110NEWinogradLayerKernel::NEWinogradLayerKernel()
111 : _convolver(nullptr), _output(nullptr)
112{
113}
114
115void NEWinogradLayerKernel::configure(ITensor *output, Winograd3x3F32 *convolver)
116{
117 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
118 _convolver = convolver;
119 Window win = calculate_max_window(*output->info());
120 INEKernel::configure(win);
121}
122
123void NEWinogradLayerKernel::run(const Window &window, const ThreadInfo &info)
124{
125 ARM_COMPUTE_UNUSED(window);
126 ARM_COMPUTE_UNUSED(info);
127 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
128 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
129 ARM_COMPUTE_ERROR_ON(info.num_threads < 1);
130 const size_t tid = info.thread_id;
131 const size_t num_threads = std::min(info.num_threads, 16);
132 const size_t num_gemms_per_thread = 16 / num_threads;
133 const size_t first_gemm = tid * num_gemms_per_thread;
134 const size_t last_gemm = (tid == (num_threads - 1)) ? 15 : first_gemm + num_gemms_per_thread - 1;
Pablo Tello3d4968a2017-12-04 15:03:35 +0000135 _convolver->_pimpl->convolver.execute(first_gemm, last_gemm);
Pablo Tello89519332017-11-17 11:52:36 +0000136}
137} // namespace arm_compute