blob: 2de7d2b279ba7f8e065bf4f5858eb4a7c9381fba [file] [log] [blame]
Anthony Barbier71d9b572018-07-06 17:05:59 +01001/*
Georgios Pinitas7cd26d42019-01-09 18:35:17 +00002 * Copyright (c) 2018-2019 ARM Limited.
Anthony Barbier71d9b572018-07-06 17:05:59 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
25
Anthony Barbiereaefd002018-07-20 17:49:35 +010026#include "arm_compute/core/CPP/Validate.h"
Anthony Barbierc8e84b52018-07-17 16:48:42 +010027#include "arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010028#include "arm_compute/runtime/NEON/NEScheduler.h"
Anthony Barbierc8e84b52018-07-17 16:48:42 +010029#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
Anthony Barbier3d677cc2018-07-23 16:42:59 +010030#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010031
Anthony Barbiereaefd002018-07-20 17:49:35 +010032#include <arm_neon.h>
33
Anthony Barbierc8e84b52018-07-17 16:48:42 +010034namespace arm_compute
35{
Anthony Barbiereaefd002018-07-20 17:49:35 +010036namespace
Anthony Barbier71d9b572018-07-06 17:05:59 +010037{
Michalis Spyroubcfd09a2019-05-01 13:03:59 +010038std::unique_ptr<IFunction> create_function_all_types(const arm_gemm::KernelDescription &gemm_kernel_info,
Georgios Pinitas37d080f2019-06-21 18:43:12 +010039 const ITensor *a, const ITensor *b, ITensor *d,
40 float alpha, float beta, const GEMMInfo &gemm_info,
Anthony Barbier3d677cc2018-07-23 16:42:59 +010041 std::shared_ptr<IMemoryManager> memory_manager)
42
Anthony Barbiereaefd002018-07-20 17:49:35 +010043{
Georgios Pinitas37d080f2019-06-21 18:43:12 +010044 // Note: It's safe to not check for FP16 support because this was already checked in NEGEMMAssemblyDispatch::configure()
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000045 switch(gemm_kernel_info.method)
Anthony Barbierc8e84b52018-07-17 16:48:42 +010046 {
Anthony Barbier3d677cc2018-07-23 16:42:59 +010047 case arm_gemm::GemmMethod::GEMM_INTERLEAVED:
48 {
Georgios Pinitas37d080f2019-06-21 18:43:12 +010049 if(!gemm_info.pretranpose_B())
Anthony Barbier3d677cc2018-07-23 16:42:59 +010050 {
51 return nullptr;
52 }
53 auto function = support::cpp14::make_unique<NEGEMMInterleavedWrapper>(memory_manager);
Georgios Pinitas37d080f2019-06-21 18:43:12 +010054 function->configure(a, b, d, alpha, beta, gemm_info);
Anthony Barbier3d677cc2018-07-23 16:42:59 +010055 return std::move(function);
56 }
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000057#if defined(__aarch64__)
Anthony Barbierc8e84b52018-07-17 16:48:42 +010058 case arm_gemm::GemmMethod::GEMM_NATIVE:
59 {
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000060 if(gemm_kernel_info.name.find("sgemm_native_16x4") != std::string::npos)
61 {
62 auto kernel = support::cpp14::make_unique<NEGEMMNativeWrapperKernel<float, float>>();
Georgios Pinitas37d080f2019-06-21 18:43:12 +010063 kernel->configure(a, b, d, alpha, beta, gemm_info);
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000064 auto function = support::cpp14::make_unique<NESimpleAssemblyFunction>();
65 function->configure(std::move(kernel));
66 return std::move(function);
67 }
68 return nullptr;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010069 }
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000070#endif // defined(__aarch64__)
Anthony Barbierc8e84b52018-07-17 16:48:42 +010071 default:
Anthony Barbiereaefd002018-07-20 17:49:35 +010072 return nullptr;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010073 }
74}
75
Anthony Barbiereaefd002018-07-20 17:49:35 +010076/** Fallback in case ACL doesn't have a function */
Anthony Barbierc8e84b52018-07-17 16:48:42 +010077template <typename TypeInput, typename TypeOutput>
Anthony Barbiereaefd002018-07-20 17:49:35 +010078class Fallback : public NEGEMMAssemblyDispatch::IFallback
Anthony Barbierc8e84b52018-07-17 16:48:42 +010079{
Anthony Barbiereaefd002018-07-20 17:49:35 +010080public:
Georgios Pinitas3dbfd232019-01-30 17:17:16 +000081 /** Initialise the functions's input and output.
82 *
83 * @param[in] a Input tensor containing the Matrix A.
84 * @param[in] b Input tensor containing the Matrix B.
85 * @param[out] d Output tensor to store the result of matrix multiplication.
86 * @param[in] args Matrix multiplication information.
Georgios Pinitas37d080f2019-06-21 18:43:12 +010087 * @param[in] gemm_info GEMM meta-data
Georgios Pinitas3dbfd232019-01-30 17:17:16 +000088 * @param[in] memory_group Memory group to be used by the function.
89 */
Georgios Pinitas37d080f2019-06-21 18:43:12 +010090 void configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<TypeOutput> args,
91 const GEMMInfo &gemm_info, MemoryGroup &memory_group);
Georgios Pinitas3dbfd232019-01-30 17:17:16 +000092
93 // Inherited methods overridden:
Anthony Barbiereaefd002018-07-20 17:49:35 +010094 void run() override;
95 void prepare() override;
96 bool is_configured() const override;
Anthony Barbierc8e84b52018-07-17 16:48:42 +010097
Anthony Barbiereaefd002018-07-20 17:49:35 +010098private:
99 /** Allocate a workspace tensor.
100 *
101 * @param[in] workspace_size Size to allocate.
102 * @param[in] memory_group Tensor memory group.
103 * @param[in] alignment Workspace memory alignment.
104 */
Anthony Barbier20394d52018-08-02 11:29:09 +0100105 void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment);
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100106
Anthony Barbiereaefd002018-07-20 17:49:35 +0100107 /** Assembly Gemm kernel */
108 std::unique_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
109 /** Optimised NEON kernel */
110 std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
111 /** Input A */
112 const ITensor *_a
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100113 {
Anthony Barbiereaefd002018-07-20 17:49:35 +0100114 nullptr
115 };
116 /** Input B */
117 const ITensor *_b
118 {
119 nullptr
120 };
121 /** Output */
122 ITensor *_d{ nullptr };
123 /** GEMM workspace */
124 Tensor _workspace{};
125 /** Pre-transpose tensor */
126 Tensor _pretranspose{};
127 /** Prepared flag */
128 bool _is_prepared{ false };
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100129 /** GEMM meta-data */
130 GEMMInfo _gemm_info{};
Anthony Barbiereaefd002018-07-20 17:49:35 +0100131};
Anthony Barbier71d9b572018-07-06 17:05:59 +0100132
133template <typename TypeInput, typename TypeOutput>
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100134void Fallback<TypeInput, TypeOutput>::configure(const ITensor *a, const ITensor *b, ITensor *d, arm_gemm::GemmArgs<TypeOutput> args,
135 const GEMMInfo &gemm_info, MemoryGroup &memory_group)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100136{
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000137 arm_gemm::GemmConfig gemm_cfg;
138 const arm_gemm::KernelDescription gemm_kernel_info = arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args);
139 if(gemm_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED)
140 {
141 gemm_cfg.filter = gemm_kernel_info.name;
142 args._cfg = &gemm_cfg;
143 }
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000144 _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput>(args);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100145 if(_gemm_kernel_asm == nullptr)
146 {
147 //configuration not supported: Leave function unconfigured:
148 return;
149 }
150
151 // arm_compute wrapper for the Gemm object (see above)
152 std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
153 ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
Georgios Pinitas3dbfd232019-01-30 17:17:16 +0000154 acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100155 const size_t workspace_size = _gemm_kernel_asm->get_working_size();
156 if(workspace_size > 0)
157 {
158 // Allocate workspace
159 const unsigned int alignment = 4096;
Anthony Barbier20394d52018-08-02 11:29:09 +0100160 allocate_workspace(workspace_size, memory_group, alignment);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100161 }
162
163 //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
164 //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
165 {
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100166 const int window_size = _gemm_kernel_asm->get_window_size();
167 if(window_size < args._maxthreads)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100168 {
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100169 _gemm_kernel_asm->set_nthreads(window_size);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100170 }
171 }
172
173 _optimised_kernel = std::move(acl_gemm_wrapper);
174 _a = a;
175 _b = b;
176 _d = d;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100177 _gemm_info = gemm_info;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100178 // Check for pre-transposed support
179 if(_gemm_kernel_asm->B_pretranspose_required())
180 {
181 // Forcing 128-byte alignment (required by 32-bit kernels)
182 const unsigned int alignment = 128;
183 const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size();
184 _pretranspose.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100185 }
186}
187
188template <typename TypeInput, typename TypeOutput>
Anthony Barbiereaefd002018-07-20 17:49:35 +0100189void Fallback<TypeInput, TypeOutput>::prepare()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100190{
191 if(!_is_prepared)
192 {
193 // Pretranspose B if required
194 if(_gemm_kernel_asm->B_pretranspose_required())
195 {
Georgios Pinitasca1250d2018-11-22 19:38:27 +0000196 _pretranspose.allocator()->allocate();
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100197 ARM_COMPUTE_ERROR_ON(_pretranspose.buffer() == nullptr);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100198 const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitaseb84d6b2018-07-27 18:28:10 +0100199 const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
Anthony Barbier71d9b572018-07-06 17:05:59 +0100200 const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
201
Anthony Barbier71d9b572018-07-06 17:05:59 +0100202 _gemm_kernel_asm->pretranspose_B_array(_pretranspose.buffer(), in1_ptr, ldb, multi_stride_b);
203 _b->mark_as_unused();
204 }
205
206 _is_prepared = true;
207 }
208}
209
210template <typename TypeInput, typename TypeOutput>
Anthony Barbier20394d52018-08-02 11:29:09 +0100211void Fallback<TypeInput, TypeOutput>::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment)
Anthony Barbier71d9b572018-07-06 17:05:59 +0100212{
213 ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
214 _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
Anthony Barbier20394d52018-08-02 11:29:09 +0100215 memory_group.manage(&_workspace);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100216 _workspace.allocator()->allocate();
217}
218
219template <typename TypeInput, typename TypeOutput>
Anthony Barbiereaefd002018-07-20 17:49:35 +0100220bool Fallback<TypeInput, TypeOutput>::is_configured() const
Anthony Barbier71d9b572018-07-06 17:05:59 +0100221{
222 return _optimised_kernel != nullptr;
223}
224
225template <typename TypeInput, typename TypeOutput>
Anthony Barbiereaefd002018-07-20 17:49:35 +0100226void Fallback<TypeInput, TypeOutput>::run()
Anthony Barbier71d9b572018-07-06 17:05:59 +0100227{
228 const int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100229 int ldb = 0;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100230 const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput);
231
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100232 const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d() != 0 ? 3 : 2;
233 const size_t a_multi_idx = a_batch_idx + 1;
234 const size_t d_batch_idx = _gemm_info.depth_output_gemm3d() != 0 ? 3 : 2;
235 const size_t d_multi_idx = d_batch_idx + 1;
Anthony Barbier71d9b572018-07-06 17:05:59 +0100236
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100237 const int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput);
238 const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100239
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100240 const int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput);
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100241 int multi_stride_b = 0;
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100242 const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput);
Anthony Barbier71d9b572018-07-06 17:05:59 +0100243
Georgios Pinitas40ed6d82018-07-31 17:22:11 +0100244 const auto in0_ptr = reinterpret_cast<const TypeInput *>(_a->buffer() + _a->info()->offset_first_element_in_bytes());
245 const TypeInput *in1_ptr = nullptr;
246 auto out_ptr = reinterpret_cast<TypeOutput *>(_d->buffer() + _d->info()->offset_first_element_in_bytes());
247
248 // Check if B is pre-tranposed and de-reference if not
249 if(!_gemm_kernel_asm->B_is_pretransposed())
250 {
251 ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
252 multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
253 in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
254 }
Anthony Barbier71d9b572018-07-06 17:05:59 +0100255
256 // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
257 if(_workspace.buffer() != nullptr)
258 {
259 _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
260 const unsigned int window_size = _gemm_kernel_asm->get_window_size();
261 unsigned int num_threads = NEScheduler::get().num_threads();
262 if(window_size < num_threads)
263 {
264 num_threads = window_size;
265 _gemm_kernel_asm->set_nthreads(num_threads);
266 }
267 }
268
269 // Prepare assembly kernel
270 prepare();
271
272 // Set gemm parameters
273 _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a, in1_ptr, ldb, multi_stride_b, out_ptr, ldd, batch_stride_d, multi_stride_d);
274
275 // Schedule assembly kernel
276 NEScheduler::get().schedule(_optimised_kernel.get(), Window::DimX);
277}
278
Anthony Barbiereaefd002018-07-20 17:49:35 +0100279template <typename TypeInput, typename TypeOutput>
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100280void create_function_or_arm_gemm(std::unique_ptr<IFunction> &acl_function,
281 std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm,
282 MemoryGroup &memory_group, const ITensor *a, const ITensor *b,
283 ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info,
284 std::shared_ptr<IMemoryManager> memory_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100285{
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100286 INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d, gemm_info);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100287 const CPUInfo &ci = NEScheduler::get().cpu_info();
288 unsigned int num_threads = NEScheduler::get().num_threads();
289
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100290 arm_gemm::GemmArgs<TypeOutput> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, gemm_info.pretranpose_B());
Anthony Barbiereaefd002018-07-20 17:49:35 +0100291
292 //Try to create an ACL function:
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100293 acl_function = create_function_all_types(arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args), a, b, d, alpha, beta, gemm_info, std::move(memory_manager));
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000294
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100295 //If we still don't have an ACL function:
Anthony Barbiereaefd002018-07-20 17:49:35 +0100296 if(acl_function == nullptr)
297 {
298 //Fallback onto arm_gemm function if ACL doesn't support this method.
299 auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput>>();
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100300 fallback->configure(a, b, d, args, gemm_info, memory_group);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100301 arm_gemm = std::move(fallback);
302 }
303}
304
305} //namespace
306
307NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100308 : _function(nullptr), _arm_gemm(nullptr), _memory_group(memory_manager), _memory_manager(memory_manager)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100309{
310}
311
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100312Status NEGEMMAssemblyDispatch::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100313{
314 ARM_COMPUTE_UNUSED(alpha);
315 ARM_COMPUTE_UNUSED(beta);
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100316 ARM_COMPUTE_UNUSED(gemm_info);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100317 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(a, b, d);
318 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
319#ifndef __aarch64__
320 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 || a->data_type() == DataType::S8 || a->data_type() == DataType::QASYMM8, "8bit integer types only supported for aarch64");
321#endif /* __aarch64__ */
322 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::U8, DataType::QASYMM8, DataType::S8, DataType::F16);
323 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
324 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input");
325 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input");
Anthony Barbier90367492018-08-01 13:56:08 +0100326 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input");
327 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::S32 && d->data_type() != DataType::U32, "Only U32/S32 output supported for QASYMM8 input");
Anthony Barbiereaefd002018-07-20 17:49:35 +0100328 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input");
329 return Status{};
330}
331
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100332void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info)
Anthony Barbiereaefd002018-07-20 17:49:35 +0100333{
334 ARM_COMPUTE_ERROR_ON_NULLPTR(a);
335 ARM_COMPUTE_ERROR_ON_NULLPTR(b);
336 ARM_COMPUTE_ERROR_ON_NULLPTR(d);
337
338 //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100339 if(!NEGEMMAssemblyDispatch::validate(a->info(), b->info(), d->info(), alpha, beta, gemm_info))
Anthony Barbiereaefd002018-07-20 17:49:35 +0100340 {
341 return;
342 }
343
344 switch(a->info()->data_type())
345 {
346 case DataType::F32:
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100347 create_function_or_arm_gemm<float, float>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, gemm_info, _memory_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100348 break;
349#ifdef __aarch64__
350 case DataType::U8:
351 case DataType::QASYMM8:
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100352 create_function_or_arm_gemm<uint8_t, uint32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, gemm_info, _memory_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100353 break;
354 case DataType::S8:
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100355 create_function_or_arm_gemm<int8_t, int32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, gemm_info, _memory_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100356 break;
357#endif /* __aarch64__ */
358#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
359 case DataType::F16:
Georgios Pinitas37d080f2019-06-21 18:43:12 +0100360 create_function_or_arm_gemm<float16_t, float16_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, gemm_info, _memory_manager);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100361 break;
362#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
363 default:
364 break;
365 }
366}
367
368void NEGEMMAssemblyDispatch::prepare()
369{
370 if(_function != nullptr)
371 {
372 _function->prepare();
373 }
374 else
375 {
376 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
377 _arm_gemm->prepare();
378 }
379}
380
381bool NEGEMMAssemblyDispatch::is_configured() const
382{
383 return (_arm_gemm != nullptr && _arm_gemm->is_configured()) || _function != nullptr;
384}
385
386void NEGEMMAssemblyDispatch::run()
387{
Georgios Pinitasda953f22019-04-02 17:27:03 +0100388 MemoryGroupResourceScope scope_mg(_memory_group);
Anthony Barbiereaefd002018-07-20 17:49:35 +0100389 if(_function != nullptr)
390 {
391 _function->run();
392 }
393 else
394 {
395 ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
396 _arm_gemm->run();
397 }
Anthony Barbiereaefd002018-07-20 17:49:35 +0100398}
Anthony Barbier71d9b572018-07-06 17:05:59 +0100399} //namespace arm_compute