blob: 30232b44357982ffa19c1d88247c6463e4d0aaf5 [file] [log] [blame]
Anthony Barbier71d9b572018-07-06 17:05:59 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Anthony Barbier71d9b572018-07-06 17:05:59 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
25
Michele Di Giorgio6ad60af2020-06-09 14:52:15 +010026#include "src/core/NEON/kernels/assembly/arm_gemm.hpp"
27
Anthony Barbiereaefd002018-07-20 17:49:35 +010028#include "arm_compute/core/CPP/Validate.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Anthony Barbierc8e84b52018-07-17 16:48:42 +010030#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010031
Michele Di Giorgio6ad60af2020-06-09 14:52:15 +010032#include "src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
33
Anthony Barbiereaefd002018-07-20 17:49:35 +010034#include <arm_neon.h>
35
Anthony Barbierc8e84b52018-07-17 16:48:42 +010036namespace arm_compute
37{
Anthony Barbiereaefd002018-07-20 17:49:35 +010038namespace
Anthony Barbier71d9b572018-07-06 17:05:59 +010039{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010040arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act)
Anthony Barbiereaefd002018-07-20 17:49:35 +010041{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010042 arm_gemm::Activation gemm_act;
43
44 // Early exit in case lower bound is other than 0, as it's not yet supported
45 if(act.b() != 0.f)
Anthony Barbierc8e84b52018-07-17 16:48:42 +010046 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010047 return gemm_act;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010048 }
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010049
50 switch(act.activation())
51 {
52 case ActivationLayerInfo::ActivationFunction::RELU:
53 gemm_act.type = arm_gemm::Activation::Type::ReLU;
54 break;
55 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
56 gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
57 gemm_act.param1 = act.a();
58 gemm_act.param2 = 0.f;
59 break;
60 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
61 gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
62 gemm_act.param1 = act.a();
63 gemm_act.param2 = act.b();
64 break;
65 default:
66 gemm_act.type = arm_gemm::Activation::Type::None;
67 }
68
69 return gemm_act;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010070}
71
Michalis Spyrou1a569a32019-09-10 17:20:34 +010072template <typename TypeInput, typename TypeOutput>
73class FallbackTransform : public ITransformWeights
74{
75public:
Michalis Spyrou5cb49dc2019-12-03 13:42:25 +000076 FallbackTransform() noexcept {};
77 /** Prevent instances of this class from being copied (As this class contains pointers) */
78 FallbackTransform(const FallbackTransform &) = delete;
79 /** Default move constructor */
80 FallbackTransform(FallbackTransform &&) = default;
81 /** Prevent instances of this class from being copied (As this class contains pointers) */
82 FallbackTransform &operator=(const FallbackTransform &) = delete;
83 /** Default move assignment operator */
84 FallbackTransform &operator=(FallbackTransform &&) = default;
85 void run() override
Michalis Spyrou1a569a32019-09-10 17:20:34 +010086 {
87 _output.allocator()->allocate();
88 ARM_COMPUTE_ERROR_ON(_output.buffer() == nullptr);
89 _gemm_kernel_asm->pretranspose_B_array(_output.buffer(), _in1_ptr, _ldb, _multi_stride_b);
90 _reshape_run = true;
91 }
92
93 void release() override
94 {
95 _output.allocator()->free();
96 }
97
98 ITensor *get_weights() override
99 {
100 return &_output;
101 }
102
103 uint32_t uid() override
104 {
105 uint32_t id = (_B_pretranspose_size | 0x80000000);
106 return id;
107 }
108
109 void configure(size_t B_pretranspose_size, unsigned int alignment)
110 {
111 _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
112 _B_pretranspose_size = B_pretranspose_size;
113 }
114
115 void set_pretranspose(ITensor *tensor)
116 {
117 if(!_reshape_run)
118 {
119 _gemm_kernel_asm->set_pretransposed_B_data(tensor->buffer());
120 }
121 }
122
123 void set_args(const int ldb, const TypeInput *in1_ptr, const int multi_stride_b, std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> gemm_kernel_asm)
124 {
125 _ldb = ldb;
126 _in1_ptr = in1_ptr;
127 _multi_stride_b = multi_stride_b;
128 _gemm_kernel_asm = gemm_kernel_asm;
129 }
130
131private:
132 Tensor _output{};
133 int _ldb{};
134 const TypeInput *_in1_ptr{};
135 int _multi_stride_b{};
136 size_t _B_pretranspose_size{};
137 std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
138};
139
Anthony Barbiereaefd002018-07-20 17:49:35 +0100140/** Fallback in case ACL doesn't have a function */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100141template <typename TypeInput, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
Anthony Barbiereaefd002018-07-20 17:49:35 +0100142class Fallback : public NEGEMMAssemblyDispatch::IFallback
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100143{
Anthony Barbiereaefd002018-07-20 17:49:35 +0100144public:
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100145 /** Destructor */
146 ~Fallback()
147 {
148 // Release memory if we have allocated the memory ourselves
149 if(_pretranspose && !(_weights_manager && _weights_manager->are_weights_managed(_b)))
150 {
151 delete _pretranspose;
152 }
153 }
154
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000155 /** Initialise the functions's input and output.
156 *
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100157 * @param[in] a Input tensor containing the Matrix A.
158 * @param[in] b Input tensor containing the Matrix B.
159 * @param[in] c Input tensor containing the Matrix C.
160 * @param[out] d Output tensor to store the result of matrix multiplication.
161 * @param[in] args Matrix multiplication information.
162 * @param[in] gemm_info GEMM meta-data
163 * @param[in] memory_group Memory group to be used by the function.
164 * @param[in] weights_manager Weights manager to be used by the function.
165 * @param[in] os Output stage meta-data.
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000166 */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100167 void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100168 arm_gemm::GemmArgs args, const GEMMInfo &gemm_info,
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100169 MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os = {});
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000170
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000171 /** Set requantization shifts to be used
172 *
173 * @param[in] shifts Requantization shifts
174 *
175 * @return Pointer to the shift data
176 */
177 /** Set requantization data to be used
178 *
179 *
180 * @param shifts Requantization shifts
181 * @param multipliers Requantization multipliers
182 *
183 * @return A tuple with the pointers to the shift and multiplier data respectively
184 */
morgolock0bc80da2020-08-10 16:44:18 +0100185 std::tuple<bool, const int32_t *, const int32_t *, const int32_t *> set_requantize_data(const std::vector<int32_t> &shifts,
186 const std::vector<int32_t> &multipliers);
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000187
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000188 // Inherited methods overridden:
Anthony Barbiereaefd002018-07-20 17:49:35 +0100189 void run() override;
190 void prepare() override;
191 bool is_configured() const override;
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100192
Anthony Barbiereaefd002018-07-20 17:49:35 +0100193private:
194 /** Allocate a workspace tensor.
195 *
196 * @param[in] workspace_size Size to allocate.
197 * @param[in] memory_group Tensor memory group.
198 * @param[in] alignment Workspace memory alignment.
199 */
Anthony Barbier20394d52018-08-02 11:29:09 +0100200 void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment);
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100201
Anthony Barbiereaefd002018-07-20 17:49:35 +0100202 /** Assembly Gemm kernel */
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100203 std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100204 /** Optimised NEON kernel */
205 std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
206 /** Input A */
207 const ITensor *_a
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100208 {
Anthony Barbiereaefd002018-07-20 17:49:35 +0100209 nullptr
210 };
211 /** Input B */
212 const ITensor *_b
213 {
214 nullptr
215 };
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100216 const ITensor *_c
217 {
218 nullptr
219 };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100220 /** Output */
221 ITensor *_d{ nullptr };
222 /** GEMM workspace */
223 Tensor _workspace{};
224 /** Pre-transpose tensor */
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100225 ITensor *_pretranspose{ nullptr };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100226 /** Prepared flag */
227 bool _is_prepared{ false };
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100228 /** GEMM meta-data */
229 GEMMInfo _gemm_info{};
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100230 /** Weights manager */
231 IWeightsManager *_weights_manager{ nullptr };
232 /** Weights transform object */
233 FallbackTransform<TypeInput, TypeOutput> _weights_transform{};
Georgios Pinitas77d42522019-11-05 13:35:47 +0000234 /** GEMM kernel description */
235 arm_gemm::KernelDescription _kernel_info{};
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000236 /** Per channel quantization shifts */
237 std::vector<int32_t> _shifts{};
morgolock0bc80da2020-08-10 16:44:18 +0100238 std::vector<int32_t> right_shifts{};
239 std::vector<int32_t> left_shifts{};
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000240 /** Per channel quantization multipliers */
241 std::vector<int32_t> _multipliers{};
Anthony Barbiereaefd002018-07-20 17:49:35 +0100242};
Anthony Barbier71d9b572018-07-06 17:05:59 +0100243
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100244template <typename TypeInput, typename TypeOutput, class OutputStage>
morgolock0bc80da2020-08-10 16:44:18 +0100245std::tuple<bool, const int32_t *, const int32_t *, const int32_t *> Fallback<TypeInput, TypeOutput, OutputStage>::set_requantize_data(const std::vector<int32_t> &shifts,
246 const std::vector<int32_t> &multipliers)
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000247{
morgolock0bc80da2020-08-10 16:44:18 +0100248 _multipliers = multipliers;
249 _shifts = shifts;
250 bool need_left = false;
251 for(const auto s : _shifts)
252 {
253 left_shifts.push_back(std::max(-s, int32_t(0)));
254 right_shifts.push_back(std::min(-s, int32_t(0)));
morgolockfa269bb2020-09-08 16:00:56 +0100255 if(s < 0 && !need_left)
morgolock0bc80da2020-08-10 16:44:18 +0100256 {
257 need_left = true;
258 }
259 }
260 return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data());
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000261}
262
263template <typename TypeInput, typename TypeOutput, class OutputStage>
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100264void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100265 arm_gemm::GemmArgs args, const GEMMInfo &gemm_info,
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100266 MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100267{
Georgios Pinitas77d42522019-11-05 13:35:47 +0000268 arm_gemm::GemmConfig gemm_cfg;
269 _kernel_info = arm_gemm::get_gemm_method<TypeInput, TypeOutput, OutputStage>(args, os);
270 _weights_manager = weights_manager;
271 if(_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED)
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000272 {
Georgios Pinitas77d42522019-11-05 13:35:47 +0000273 gemm_cfg.filter = _kernel_info.name;
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000274 args._cfg = &gemm_cfg;
275 }
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100276 _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput, OutputStage>(args, os);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100277 if(_gemm_kernel_asm == nullptr)
278 {
279 //configuration not supported: Leave function unconfigured:
280 return;
281 }
282
283 // arm_compute wrapper for the Gemm object (see above)
284 std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
285 ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000286 acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100287 const size_t workspace_size = _gemm_kernel_asm->get_working_size();
288 if(workspace_size > 0)
289 {
290 // Allocate workspace
291 const unsigned int alignment = 4096;
Anthony Barbier20394d52018-08-02 11:29:09 +0100292 allocate_workspace(workspace_size, memory_group, alignment);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100293 }
294
295 //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
296 //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
297 {
Georgios Pinitas5aa1a0b2020-07-02 20:02:20 +0100298 const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000299 if(window_size < static_cast<unsigned int>(args._maxthreads))
Anthony Barbier71d9b572018-07-06 17:05:59 +0100300 {
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100301 _gemm_kernel_asm->set_nthreads(window_size);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100302 }
303 }
304
305 _optimised_kernel = std::move(acl_gemm_wrapper);
306 _a = a;
307 _b = b;
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100308 _c = c;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100309 _d = d;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100310 _gemm_info = gemm_info;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100311 // Check for pre-transposed support
312 if(_gemm_kernel_asm->B_pretranspose_required())
313 {
314 // Forcing 128-byte alignment (required by 32-bit kernels)
315 const unsigned int alignment = 128;
316 const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size();
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100317 if(weights_manager && _weights_manager->are_weights_managed(b))
318 {
319 _weights_transform.configure(B_pretranspose_size, alignment);
320 _pretranspose = _weights_manager->acquire(b, &_weights_transform);
321 }
322 else
323 {
324 _pretranspose = new Tensor();
325 static_cast<Tensor *>(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
326 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100327 }
328}
329
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100330template <typename TypeInput, typename TypeOutput, class OutputStage>
331void Fallback<TypeInput, TypeOutput, OutputStage>::prepare()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100332{
333 if(!_is_prepared)
334 {
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100335 // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
336 if(_c && _c->info()->data_type() == DataType::S32)
337 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100338 _gemm_kernel_asm->set_quantized_bias(reinterpret_cast<const int32_t *>(_c->buffer() + _c->info()->offset_first_element_in_bytes()), 0);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100339 }
340
Anthony Barbier71d9b572018-07-06 17:05:59 +0100341 // Pretranspose B if required
342 if(_gemm_kernel_asm->B_pretranspose_required())
343 {
344 const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitaseb84d6b2018-07-27 18:28:10 +0100345 const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
Anthony Barbier71d9b572018-07-06 17:05:59 +0100346 const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
347
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100348 if(_weights_manager && _weights_manager->are_weights_managed(_b))
349 {
350 _weights_transform.set_args(ldb, in1_ptr, multi_stride_b, _gemm_kernel_asm);
351 _weights_manager->run(_b, &_weights_transform);
352
353 // If we didn't run the reshape function, set the pretransposed buffer
354 if(!_weights_transform.is_reshape_run())
355 {
356 _weights_transform.set_pretranspose(_pretranspose);
357 }
358 }
359 else
360 {
361 static_cast<Tensor *>(_pretranspose)->allocator()->allocate();
362 ARM_COMPUTE_ERROR_ON(_pretranspose->buffer() == nullptr);
363 _gemm_kernel_asm->pretranspose_B_array(_pretranspose->buffer(), in1_ptr, ldb, multi_stride_b);
364 _b->mark_as_unused();
365 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100366 }
367
368 _is_prepared = true;
369 }
370}
371
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100372template <typename TypeInput, typename TypeOutput, class OutputStage>
373void Fallback<TypeInput, TypeOutput, OutputStage>::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100374{
375 ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
376 _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
Anthony Barbier20394d52018-08-02 11:29:09 +0100377 memory_group.manage(&_workspace);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100378 _workspace.allocator()->allocate();
379}
380
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100381template <typename TypeInput, typename TypeOutput, class OutputStage>
382bool Fallback<TypeInput, TypeOutput, OutputStage>::is_configured() const
Anthony Barbier71d9b572018-07-06 17:05:59 +0100383{
384 return _optimised_kernel != nullptr;
385}
386
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100387template <typename TypeInput, typename TypeOutput, class OutputStage>
388void Fallback<TypeInput, TypeOutput, OutputStage>::run()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100389{
390 const int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100391 int ldb = 0;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100392 const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput);
393
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100394 const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d() != 0 ? 3 : 2;
395 const size_t a_multi_idx = a_batch_idx + 1;
396 const size_t d_batch_idx = _gemm_info.depth_output_gemm3d() != 0 ? 3 : 2;
397 const size_t d_multi_idx = d_batch_idx + 1;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100398
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100399 const int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput);
400 const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100401
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100402 const int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100403 int multi_stride_b = 0;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100404 const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100405
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100406 const auto in0_ptr = reinterpret_cast<const TypeInput *>(_a->buffer() + _a->info()->offset_first_element_in_bytes());
407 const TypeInput *in1_ptr = nullptr;
408 auto out_ptr = reinterpret_cast<TypeOutput *>(_d->buffer() + _d->info()->offset_first_element_in_bytes());
409
410 // Check if B is pre-tranposed and de-reference if not
411 if(!_gemm_kernel_asm->B_is_pretransposed())
412 {
413 ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
414 multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
415 in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
416 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100417
418 // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
419 if(_workspace.buffer() != nullptr)
420 {
421 _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
Georgios Pinitas5aa1a0b2020-07-02 20:02:20 +0100422 const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
Anthony Barbier71d9b572018-07-06 17:05:59 +0100423 unsigned int num_threads = NEScheduler::get().num_threads();
424 if(window_size < num_threads)
425 {
426 num_threads = window_size;
427 _gemm_kernel_asm->set_nthreads(num_threads);
428 }
429 }
430
431 // Prepare assembly kernel
432 prepare();
433
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100434 TypeOutput *bias = nullptr;
435 // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
436 if(_c && _c->info()->data_type() != DataType::S32)
437 {
438 bias = reinterpret_cast<TypeOutput *>(_c->buffer() + _c->info()->offset_first_element_in_bytes());
439 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100440 // Set gemm parameters
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100441 _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
442 in1_ptr, ldb, multi_stride_b,
443 out_ptr, ldd, batch_stride_d, multi_stride_d,
444 bias, 0);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100445 // Schedule assembly kernel
Georgios Pinitas77d42522019-11-05 13:35:47 +0000446 IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX);
Georgios Pinitas6011f242019-11-15 14:26:44 +0000447 if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && _d->info()->data_type() == DataType::F32)
Georgios Pinitas77d42522019-11-05 13:35:47 +0000448 {
Georgios Pinitas6011f242019-11-15 14:26:44 +0000449 const int granule_threshold = 200;
450 scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold);
Georgios Pinitas77d42522019-11-05 13:35:47 +0000451 }
Michele Di Giorgiof932d2c2020-07-06 11:27:21 +0100452 else if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (_d->info()->data_type() == DataType::F32 || _d->info()->data_type() == DataType::F16
453 || _d->info()->data_type() == DataType::U8 || _d->info()->data_type() == DataType::S8))
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000454 {
455 //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions
456 const int granule_threshold = 200;
457 scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
458 }
Aleksandr Nikolaeva084b462020-06-25 12:25:52 +0100459 else if(_kernel_info.method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (_d->info()->data_type() == DataType::QASYMM8 || _d->info()->data_type() == DataType::QASYMM8_SIGNED))
460 {
461 //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case
462 const int granule_threshold = 200;
463 scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
464 }
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000465
Georgios Pinitas77d42522019-11-05 13:35:47 +0000466 NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100467}
468
Anthony Barbiereaefd002018-07-20 17:49:35 +0100469template <typename TypeInput, typename TypeOutput>
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100470void create_arm_gemm(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
471 const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const GEMMInfo &gemm_info,
472 IWeightsManager *weights_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100473{
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100474 INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100475 const CPUInfo &ci = NEScheduler::get().cpu_info();
476 unsigned int num_threads = NEScheduler::get().num_threads();
477
Georgios Pinitas0cc50ed2020-07-06 19:10:38 +0100478 arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, activation, num_threads);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100479
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100480 // Create arm_gemm fallback
481 auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput>>();
482 fallback->configure(a, b, c, d, args, gemm_info, memory_group, weights_manager);
483 arm_gemm = std::move(fallback);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100484}
485
486template <typename TypeInput, typename TypeOutput>
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100487void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
488 const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const GEMMInfo &gemm_info,
489 IWeightsManager *weights_manager)
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100490{
Michele Di Giorgio6ad60af2020-06-09 14:52:15 +0100491 ARM_COMPUTE_UNUSED(activation);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100492 INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
493 const CPUInfo &ci = NEScheduler::get().cpu_info();
494 unsigned int num_threads = NEScheduler::get().num_threads();
495
Georgios Pinitas0cc50ed2020-07-06 19:10:38 +0100496 arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, activation, num_threads);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100497
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000498 // Create arm_gemm fallback
499 auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
500
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100501 // Configure requantization info
502 const int32_t a_offset = -a->info()->quantization_info().uniform().offset;
503 const int32_t b_offset = -b->info()->quantization_info().uniform().offset;
504 const GEMMLowpOutputStageInfo os_info = gemm_info.gemmlowp_output_stage();
505
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000506 arm_gemm::Requantize32 gemm_requant_info{};
507 if(os_info.gemmlowp_shifts.size() > 1)
508 {
509 const auto requantize_data = fallback->set_requantize_data(os_info.gemmlowp_shifts, os_info.gemmlowp_multipliers);
510 gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
511 a_offset, b_offset, os_info.gemmlowp_offset,
morgolock0bc80da2020-08-10 16:44:18 +0100512 (std::get<0>(requantize_data)) ? std::get<1>(requantize_data) : nullptr,
513 std::get<2>(requantize_data),
514 std::get<3>(requantize_data),
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000515 os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
516 }
517 else
518 {
519 gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
520 a_offset, b_offset, os_info.gemmlowp_offset,
521 -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier,
522 os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
523 }
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100524
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000525 // Configure fallback
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100526 fallback->configure(a, b, c, d, args, gemm_info, memory_group, weights_manager, gemm_requant_info);
527 arm_gemm = std::move(fallback);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100528}
529
530} //namespace
531
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100532NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100533 : _arm_gemm(nullptr), _memory_group(std::move(memory_manager)), _weights_manager(weights_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100534{
535}
536
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100537Status NEGEMMAssemblyDispatch::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100538{
Georgios Pinitas0f954eb2020-06-23 17:28:38 +0100539 ARM_COMPUTE_UNUSED(c);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100540 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(a, b, d);
541 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000542 ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a);
Georgios Pinitas0f954eb2020-06-23 17:28:38 +0100543
544 ARM_COMPUTE_RETURN_ERROR_ON(!gemm_info.pretranpose_B());
Anthony Barbiereaefd002018-07-20 17:49:35 +0100545#ifndef __aarch64__
Michele Di Giorgio52556722019-12-23 16:35:12 +0000546 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->element_size() == 1, "8bit integer types only supported for aarch64");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100547#endif /* __aarch64__ */
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100548 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S8,
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000549 DataType::BFLOAT16, DataType::F16, DataType::F32);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100550 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S8,
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000551 DataType::BFLOAT16, DataType::F16, DataType::F32);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100552 if(is_data_type_quantized_per_channel(b->data_type()))
553 {
554 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8_SIGNED, DataType::S8);
555 }
556 else
557 {
558 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
559 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100560 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input");
561 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input");
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000562 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, "Only F32 output supported for BFLOAT16 input");
Anthony Barbier90367492018-08-01 13:56:08 +0100563 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100564 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input");
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100565 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100566 return Status{};
567}
568
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100569bool NEGEMMAssemblyDispatch::is_activation_supported(const ActivationLayerInfo &activation)
570{
571 arm_gemm::Activation act = map_to_arm_gemm_activation(activation);
572 return act.type != arm_gemm::Activation::Type::None;
573}
574
575void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100576{
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100577 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100578 arm_gemm::Activation act = map_to_arm_gemm_activation(gemm_info.activation_info());
Anthony Barbiereaefd002018-07-20 17:49:35 +0100579
580 //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100581 if(!NEGEMMAssemblyDispatch::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, d->info(), gemm_info))
Anthony Barbiereaefd002018-07-20 17:49:35 +0100582 {
583 return;
584 }
585
586 switch(a->info()->data_type())
587 {
588 case DataType::F32:
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100589 create_arm_gemm<float, float>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100590 break;
591#ifdef __aarch64__
592 case DataType::U8:
593 case DataType::QASYMM8:
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100594 if(d->info()->data_type() == DataType::S32)
595 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100596 create_arm_gemm<uint8_t, uint32_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100597 }
598 else
599 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100600 create_arm_gemm_quant<uint8_t, uint8_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100601 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100602 break;
603 case DataType::S8:
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100604 case DataType::QASYMM8_SIGNED:
Michalis Spyrou71ac9032019-11-14 14:31:44 +0000605 if(d->info()->data_type() == DataType::S32)
606 {
607 create_arm_gemm<int8_t, int32_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
608 }
609 else
610 {
611 create_arm_gemm_quant<int8_t, int8_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
612 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100613 break;
614#endif /* __aarch64__ */
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000615#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
616 case DataType::BFLOAT16:
617 create_arm_gemm<bfloat16, float>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
618 break;
619#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
Anthony Barbiereaefd002018-07-20 17:49:35 +0100620#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
621 case DataType::F16:
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100622 create_arm_gemm<float16_t, float16_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100623 break;
624#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
625 default:
626 break;
627 }
628}
629
630void NEGEMMAssemblyDispatch::prepare()
631{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100632 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
633 _arm_gemm->prepare();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100634}
635
636bool NEGEMMAssemblyDispatch::is_configured() const
637{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100638 return _arm_gemm != nullptr && _arm_gemm->is_configured();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100639}
640
641void NEGEMMAssemblyDispatch::run()
642{
Georgios Pinitasda953f22019-04-02 17:27:03 +0100643 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100644
645 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
646 _arm_gemm->run();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100647}
Anthony Barbier71d9b572018-07-06 17:05:59 +0100648} //namespace arm_compute