blob: 6551518bc2bb09b3ba44c423b92457804ee4e719 [file] [log] [blame]
Anthony Barbier71d9b572018-07-06 17:05:59 +01001/*
Georgios Pinitas7cd26d42019-01-09 18:35:17 +00002 * Copyright (c) 2018-2019 ARM Limited.
Anthony Barbier71d9b572018-07-06 17:05:59 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
25
Anthony Barbiereaefd002018-07-20 17:49:35 +010026#include "arm_compute/core/CPP/Validate.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010027#include "arm_compute/runtime/NEON/NEScheduler.h"
Anthony Barbierc8e84b52018-07-17 16:48:42 +010028#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010029
Anthony Barbiereaefd002018-07-20 17:49:35 +010030#include <arm_neon.h>
31
Anthony Barbierc8e84b52018-07-17 16:48:42 +010032namespace arm_compute
33{
Anthony Barbiereaefd002018-07-20 17:49:35 +010034namespace
Anthony Barbier71d9b572018-07-06 17:05:59 +010035{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010036arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act)
Anthony Barbiereaefd002018-07-20 17:49:35 +010037{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010038 arm_gemm::Activation gemm_act;
39
40 // Early exit in case lower bound is other than 0, as it's not yet supported
41 if(act.b() != 0.f)
Anthony Barbierc8e84b52018-07-17 16:48:42 +010042 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010043 return gemm_act;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010044 }
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010045
46 switch(act.activation())
47 {
48 case ActivationLayerInfo::ActivationFunction::RELU:
49 gemm_act.type = arm_gemm::Activation::Type::ReLU;
50 break;
51 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
52 gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
53 gemm_act.param1 = act.a();
54 gemm_act.param2 = 0.f;
55 break;
56 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
57 gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
58 gemm_act.param1 = act.a();
59 gemm_act.param2 = act.b();
60 break;
61 default:
62 gemm_act.type = arm_gemm::Activation::Type::None;
63 }
64
65 return gemm_act;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010066}
67
Michalis Spyrou1a569a32019-09-10 17:20:34 +010068template <typename TypeInput, typename TypeOutput>
69class FallbackTransform : public ITransformWeights
70{
71public:
Michalis Spyrou5cb49dc2019-12-03 13:42:25 +000072 FallbackTransform() noexcept {};
73 /** Prevent instances of this class from being copied (As this class contains pointers) */
74 FallbackTransform(const FallbackTransform &) = delete;
75 /** Default move constructor */
76 FallbackTransform(FallbackTransform &&) = default;
77 /** Prevent instances of this class from being copied (As this class contains pointers) */
78 FallbackTransform &operator=(const FallbackTransform &) = delete;
79 /** Default move assignment operator */
80 FallbackTransform &operator=(FallbackTransform &&) = default;
81 void run() override
Michalis Spyrou1a569a32019-09-10 17:20:34 +010082 {
83 _output.allocator()->allocate();
84 ARM_COMPUTE_ERROR_ON(_output.buffer() == nullptr);
85 _gemm_kernel_asm->pretranspose_B_array(_output.buffer(), _in1_ptr, _ldb, _multi_stride_b);
86 _reshape_run = true;
87 }
88
89 void release() override
90 {
91 _output.allocator()->free();
92 }
93
94 ITensor *get_weights() override
95 {
96 return &_output;
97 }
98
99 uint32_t uid() override
100 {
101 uint32_t id = (_B_pretranspose_size | 0x80000000);
102 return id;
103 }
104
105 void configure(size_t B_pretranspose_size, unsigned int alignment)
106 {
107 _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
108 _B_pretranspose_size = B_pretranspose_size;
109 }
110
111 void set_pretranspose(ITensor *tensor)
112 {
113 if(!_reshape_run)
114 {
115 _gemm_kernel_asm->set_pretransposed_B_data(tensor->buffer());
116 }
117 }
118
119 void set_args(const int ldb, const TypeInput *in1_ptr, const int multi_stride_b, std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> gemm_kernel_asm)
120 {
121 _ldb = ldb;
122 _in1_ptr = in1_ptr;
123 _multi_stride_b = multi_stride_b;
124 _gemm_kernel_asm = gemm_kernel_asm;
125 }
126
127private:
128 Tensor _output{};
129 int _ldb{};
130 const TypeInput *_in1_ptr{};
131 int _multi_stride_b{};
132 size_t _B_pretranspose_size{};
133 std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
134};
135
Anthony Barbiereaefd002018-07-20 17:49:35 +0100136/** Fallback in case ACL doesn't have a function */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100137template <typename TypeInput, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
Anthony Barbiereaefd002018-07-20 17:49:35 +0100138class Fallback : public NEGEMMAssemblyDispatch::IFallback
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100139{
Anthony Barbiereaefd002018-07-20 17:49:35 +0100140public:
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100141 /** Destructor */
142 ~Fallback()
143 {
144 // Release memory if we have allocated the memory ourselves
145 if(_pretranspose && !(_weights_manager && _weights_manager->are_weights_managed(_b)))
146 {
147 delete _pretranspose;
148 }
149 }
150
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000151 /** Initialise the functions's input and output.
152 *
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100153 * @param[in] a Input tensor containing the Matrix A.
154 * @param[in] b Input tensor containing the Matrix B.
155 * @param[in] c Input tensor containing the Matrix C.
156 * @param[out] d Output tensor to store the result of matrix multiplication.
157 * @param[in] args Matrix multiplication information.
158 * @param[in] gemm_info GEMM meta-data
159 * @param[in] memory_group Memory group to be used by the function.
160 * @param[in] weights_manager Weights manager to be used by the function.
161 * @param[in] os Output stage meta-data.
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000162 */
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100163 void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100164 arm_gemm::GemmArgs args, const GEMMInfo &gemm_info,
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100165 MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os = {});
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000166
167 // Inherited methods overridden:
Anthony Barbiereaefd002018-07-20 17:49:35 +0100168 void run() override;
169 void prepare() override;
170 bool is_configured() const override;
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100171
Anthony Barbiereaefd002018-07-20 17:49:35 +0100172private:
173 /** Allocate a workspace tensor.
174 *
175 * @param[in] workspace_size Size to allocate.
176 * @param[in] memory_group Tensor memory group.
177 * @param[in] alignment Workspace memory alignment.
178 */
Anthony Barbier20394d52018-08-02 11:29:09 +0100179 void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment);
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100180
Anthony Barbiereaefd002018-07-20 17:49:35 +0100181 /** Assembly Gemm kernel */
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100182 std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100183 /** Optimised NEON kernel */
184 std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
185 /** Input A */
186 const ITensor *_a
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100187 {
Anthony Barbiereaefd002018-07-20 17:49:35 +0100188 nullptr
189 };
190 /** Input B */
191 const ITensor *_b
192 {
193 nullptr
194 };
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100195 const ITensor *_c
196 {
197 nullptr
198 };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100199 /** Output */
200 ITensor *_d{ nullptr };
201 /** GEMM workspace */
202 Tensor _workspace{};
203 /** Pre-transpose tensor */
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100204 ITensor *_pretranspose{ nullptr };
Anthony Barbiereaefd002018-07-20 17:49:35 +0100205 /** Prepared flag */
206 bool _is_prepared{ false };
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100207 /** GEMM meta-data */
208 GEMMInfo _gemm_info{};
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100209 /** Weights manager */
210 IWeightsManager *_weights_manager{ nullptr };
211 /** Weights transform object */
212 FallbackTransform<TypeInput, TypeOutput> _weights_transform{};
Georgios Pinitas77d42522019-11-05 13:35:47 +0000213 /** GEMM kernel description */
214 arm_gemm::KernelDescription _kernel_info{};
Anthony Barbiereaefd002018-07-20 17:49:35 +0100215};
Anthony Barbier71d9b572018-07-06 17:05:59 +0100216
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100217template <typename TypeInput, typename TypeOutput, class OutputStage>
218void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100219 arm_gemm::GemmArgs args, const GEMMInfo &gemm_info,
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100220 MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100221{
Georgios Pinitas77d42522019-11-05 13:35:47 +0000222 arm_gemm::GemmConfig gemm_cfg;
223 _kernel_info = arm_gemm::get_gemm_method<TypeInput, TypeOutput, OutputStage>(args, os);
224 _weights_manager = weights_manager;
225 if(_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED)
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000226 {
Georgios Pinitas77d42522019-11-05 13:35:47 +0000227 gemm_cfg.filter = _kernel_info.name;
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000228 args._cfg = &gemm_cfg;
229 }
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100230 _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput, OutputStage>(args, os);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100231 if(_gemm_kernel_asm == nullptr)
232 {
233 //configuration not supported: Leave function unconfigured:
234 return;
235 }
236
237 // arm_compute wrapper for the Gemm object (see above)
238 std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
239 ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000240 acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100241 const size_t workspace_size = _gemm_kernel_asm->get_working_size();
242 if(workspace_size > 0)
243 {
244 // Allocate workspace
245 const unsigned int alignment = 4096;
Anthony Barbier20394d52018-08-02 11:29:09 +0100246 allocate_workspace(workspace_size, memory_group, alignment);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100247 }
248
249 //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
250 //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
251 {
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100252 const int window_size = _gemm_kernel_asm->get_window_size();
253 if(window_size < args._maxthreads)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100254 {
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100255 _gemm_kernel_asm->set_nthreads(window_size);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100256 }
257 }
258
259 _optimised_kernel = std::move(acl_gemm_wrapper);
260 _a = a;
261 _b = b;
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100262 _c = c;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100263 _d = d;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100264 _gemm_info = gemm_info;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100265 // Check for pre-transposed support
266 if(_gemm_kernel_asm->B_pretranspose_required())
267 {
268 // Forcing 128-byte alignment (required by 32-bit kernels)
269 const unsigned int alignment = 128;
270 const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size();
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100271 if(weights_manager && _weights_manager->are_weights_managed(b))
272 {
273 _weights_transform.configure(B_pretranspose_size, alignment);
274 _pretranspose = _weights_manager->acquire(b, &_weights_transform);
275 }
276 else
277 {
278 _pretranspose = new Tensor();
279 static_cast<Tensor *>(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
280 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100281 }
282}
283
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100284template <typename TypeInput, typename TypeOutput, class OutputStage>
285void Fallback<TypeInput, TypeOutput, OutputStage>::prepare()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100286{
287 if(!_is_prepared)
288 {
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100289 // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
290 if(_c && _c->info()->data_type() == DataType::S32)
291 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100292 _gemm_kernel_asm->set_quantized_bias(reinterpret_cast<const int32_t *>(_c->buffer() + _c->info()->offset_first_element_in_bytes()), 0);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100293 }
294
Anthony Barbier71d9b572018-07-06 17:05:59 +0100295 // Pretranspose B if required
296 if(_gemm_kernel_asm->B_pretranspose_required())
297 {
298 const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitaseb84d6b2018-07-27 18:28:10 +0100299 const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
Anthony Barbier71d9b572018-07-06 17:05:59 +0100300 const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
301
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100302 if(_weights_manager && _weights_manager->are_weights_managed(_b))
303 {
304 _weights_transform.set_args(ldb, in1_ptr, multi_stride_b, _gemm_kernel_asm);
305 _weights_manager->run(_b, &_weights_transform);
306
307 // If we didn't run the reshape function, set the pretransposed buffer
308 if(!_weights_transform.is_reshape_run())
309 {
310 _weights_transform.set_pretranspose(_pretranspose);
311 }
312 }
313 else
314 {
315 static_cast<Tensor *>(_pretranspose)->allocator()->allocate();
316 ARM_COMPUTE_ERROR_ON(_pretranspose->buffer() == nullptr);
317 _gemm_kernel_asm->pretranspose_B_array(_pretranspose->buffer(), in1_ptr, ldb, multi_stride_b);
318 _b->mark_as_unused();
319 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100320 }
321
322 _is_prepared = true;
323 }
324}
325
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100326template <typename TypeInput, typename TypeOutput, class OutputStage>
327void Fallback<TypeInput, TypeOutput, OutputStage>::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100328{
329 ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
330 _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
Anthony Barbier20394d52018-08-02 11:29:09 +0100331 memory_group.manage(&_workspace);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100332 _workspace.allocator()->allocate();
333}
334
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100335template <typename TypeInput, typename TypeOutput, class OutputStage>
336bool Fallback<TypeInput, TypeOutput, OutputStage>::is_configured() const
Anthony Barbier71d9b572018-07-06 17:05:59 +0100337{
338 return _optimised_kernel != nullptr;
339}
340
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100341template <typename TypeInput, typename TypeOutput, class OutputStage>
342void Fallback<TypeInput, TypeOutput, OutputStage>::run()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100343{
344 const int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100345 int ldb = 0;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100346 const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput);
347
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100348 const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d() != 0 ? 3 : 2;
349 const size_t a_multi_idx = a_batch_idx + 1;
350 const size_t d_batch_idx = _gemm_info.depth_output_gemm3d() != 0 ? 3 : 2;
351 const size_t d_multi_idx = d_batch_idx + 1;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100352
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100353 const int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput);
354 const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100355
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100356 const int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100357 int multi_stride_b = 0;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100358 const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100359
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100360 const auto in0_ptr = reinterpret_cast<const TypeInput *>(_a->buffer() + _a->info()->offset_first_element_in_bytes());
361 const TypeInput *in1_ptr = nullptr;
362 auto out_ptr = reinterpret_cast<TypeOutput *>(_d->buffer() + _d->info()->offset_first_element_in_bytes());
363
364 // Check if B is pre-tranposed and de-reference if not
365 if(!_gemm_kernel_asm->B_is_pretransposed())
366 {
367 ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
368 multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
369 in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
370 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100371
372 // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
373 if(_workspace.buffer() != nullptr)
374 {
375 _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
376 const unsigned int window_size = _gemm_kernel_asm->get_window_size();
377 unsigned int num_threads = NEScheduler::get().num_threads();
378 if(window_size < num_threads)
379 {
380 num_threads = window_size;
381 _gemm_kernel_asm->set_nthreads(num_threads);
382 }
383 }
384
385 // Prepare assembly kernel
386 prepare();
387
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100388 TypeOutput *bias = nullptr;
389 // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
390 if(_c && _c->info()->data_type() != DataType::S32)
391 {
392 bias = reinterpret_cast<TypeOutput *>(_c->buffer() + _c->info()->offset_first_element_in_bytes());
393 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100394 // Set gemm parameters
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100395 _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
396 in1_ptr, ldb, multi_stride_b,
397 out_ptr, ldd, batch_stride_d, multi_stride_d,
398 bias, 0);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100399
400 // Schedule assembly kernel
Georgios Pinitas77d42522019-11-05 13:35:47 +0000401 IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX);
Georgios Pinitas6011f242019-11-15 14:26:44 +0000402 if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && _d->info()->data_type() == DataType::F32)
Georgios Pinitas77d42522019-11-05 13:35:47 +0000403 {
Georgios Pinitas6011f242019-11-15 14:26:44 +0000404 const int granule_threshold = 200;
405 scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold);
Georgios Pinitas77d42522019-11-05 13:35:47 +0000406 }
407 NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100408}
409
Anthony Barbiereaefd002018-07-20 17:49:35 +0100410template <typename TypeInput, typename TypeOutput>
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100411void create_arm_gemm(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
412 const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const GEMMInfo &gemm_info,
413 IWeightsManager *weights_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100414{
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100415 INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100416 const CPUInfo &ci = NEScheduler::get().cpu_info();
417 unsigned int num_threads = NEScheduler::get().num_threads();
418
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100419 arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, gemm_info.pretranpose_B());
Anthony Barbiereaefd002018-07-20 17:49:35 +0100420
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100421 // Create arm_gemm fallback
422 auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput>>();
423 fallback->configure(a, b, c, d, args, gemm_info, memory_group, weights_manager);
424 arm_gemm = std::move(fallback);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100425}
426
427template <typename TypeInput, typename TypeOutput>
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100428void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
429 const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const GEMMInfo &gemm_info,
430 IWeightsManager *weights_manager)
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100431{
432 INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
433 const CPUInfo &ci = NEScheduler::get().cpu_info();
434 unsigned int num_threads = NEScheduler::get().num_threads();
435
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100436 arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, gemm_info.pretranpose_B());
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100437
438 // Configure requantization info
439 const int32_t a_offset = -a->info()->quantization_info().uniform().offset;
440 const int32_t b_offset = -b->info()->quantization_info().uniform().offset;
441 const GEMMLowpOutputStageInfo os_info = gemm_info.gemmlowp_output_stage();
442
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100443 const arm_gemm::ARequantizeLayer32 gemm_requant_info(nullptr, 0,
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100444 a_offset, b_offset, os_info.gemmlowp_offset,
445 -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier,
446 os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
447
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100448 // Create arm_gemm fallback
449 auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::ARequantizeLayer32>>();
450 fallback->configure(a, b, c, d, args, gemm_info, memory_group, weights_manager, gemm_requant_info);
451 arm_gemm = std::move(fallback);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100452}
453
454} //namespace
455
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100456NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100457 : _arm_gemm(nullptr), _memory_group(std::move(memory_manager)), _weights_manager(weights_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100458{
459}
460
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100461Status NEGEMMAssemblyDispatch::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100462{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100463 ARM_COMPUTE_UNUSED(gemm_info);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100464 ARM_COMPUTE_UNUSED(c);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100465 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(a, b, d);
466 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
467#ifndef __aarch64__
468 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 || a->data_type() == DataType::S8 || a->data_type() == DataType::QASYMM8, "8bit integer types only supported for aarch64");
469#endif /* __aarch64__ */
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100470 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S8,
471 DataType::F16, DataType::F32);
472 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S8,
473 DataType::F16, DataType::F32);
474 if(is_data_type_quantized_per_channel(b->data_type()))
475 {
476 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8_SIGNED, DataType::S8);
477 }
478 else
479 {
480 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
481 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100482 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input");
483 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input");
Anthony Barbier90367492018-08-01 13:56:08 +0100484 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100485 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input");
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100486 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input");
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100487 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8_SIGNED && d->data_type() != DataType::S32, "Only S32 output supported for QASYMM8_SIGNED input");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100488 return Status{};
489}
490
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100491bool NEGEMMAssemblyDispatch::is_activation_supported(const ActivationLayerInfo &activation)
492{
493 arm_gemm::Activation act = map_to_arm_gemm_activation(activation);
494 return act.type != arm_gemm::Activation::Type::None;
495}
496
497void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100498{
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100499 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100500 arm_gemm::Activation act = map_to_arm_gemm_activation(gemm_info.activation_info());
Anthony Barbiereaefd002018-07-20 17:49:35 +0100501
502 //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100503 if(!NEGEMMAssemblyDispatch::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, d->info(), gemm_info))
Anthony Barbiereaefd002018-07-20 17:49:35 +0100504 {
505 return;
506 }
507
508 switch(a->info()->data_type())
509 {
510 case DataType::F32:
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100511 create_arm_gemm<float, float>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100512 break;
513#ifdef __aarch64__
514 case DataType::U8:
515 case DataType::QASYMM8:
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100516 if(d->info()->data_type() == DataType::S32)
517 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100518 create_arm_gemm<uint8_t, uint32_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100519 }
520 else
521 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100522 create_arm_gemm_quant<uint8_t, uint8_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100523 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100524 break;
525 case DataType::S8:
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100526 case DataType::QASYMM8_SIGNED:
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100527 create_arm_gemm<int8_t, int32_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100528 break;
529#endif /* __aarch64__ */
530#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
531 case DataType::F16:
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100532 create_arm_gemm<float16_t, float16_t>(_arm_gemm, _memory_group, a, b, c, d, act, gemm_info, _weights_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100533 break;
534#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
535 default:
536 break;
537 }
538}
539
540void NEGEMMAssemblyDispatch::prepare()
541{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100542 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
543 _arm_gemm->prepare();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100544}
545
546bool NEGEMMAssemblyDispatch::is_configured() const
547{
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100548 return _arm_gemm != nullptr && _arm_gemm->is_configured();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100549}
550
551void NEGEMMAssemblyDispatch::run()
552{
Georgios Pinitasda953f22019-04-02 17:27:03 +0100553 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100554
555 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
556 _arm_gemm->run();
Anthony Barbiereaefd002018-07-20 17:49:35 +0100557}
Anthony Barbier71d9b572018-07-06 17:05:59 +0100558} //namespace arm_compute