blob: 3b2975dd805c26c5de22e65af4a314872fb9d160 [file] [log] [blame]
Anthony Barbier3d677cc2018-07-23 16:42:59 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
26
27#include "NEGEMMInterleavedStrategies.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/Utils.h"
31#include "arm_compute/core/Validate.h"
32#include "arm_compute/core/WindowIterator.h"
33
34namespace arm_compute
35{
36template <typename To, typename Tr, bool use_dot>
37void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker,
38 const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params &params, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads)
39{
Georgios Pinitasecae3a12018-10-09 15:13:12 +010040 using strategy = typename Kernel<To, use_dot>::strategy;
Anthony Barbier3d677cc2018-07-23 16:42:59 +010041
42 _prepared_a = prepared_a;
43 _transformed_b = transformed_b;
44 _tmp_c = tmp_c;
45 _c = c;
46 _block_walker = block_walker;
47 _block_sizes = block_sizes;
48 _params = params;
49 _b_is_pretransposed = b_is_pretransposed;
50 _alpha = alpha;
51 _beta = beta;
52
53 auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads }));
54}
55
56template <typename To, typename Tr, bool use_dot>
57void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset,
58 const Coordinates &end_offset)
59{
Georgios Pinitasecae3a12018-10-09 15:13:12 +010060 using strategy = typename Kernel<To, use_dot>::strategy;
Anthony Barbier3d677cc2018-07-23 16:42:59 +010061
62 strategy strat(info.cpu_info);
63 TensorAccessor<To> prepared_a(*_prepared_a);
64 TensorAccessor<To> transformed_b(*_transformed_b);
65 TensorAccessor<Tr> c(*_c);
66 TensorAccessor<Tr> tmp_c(*_tmp_c);
67
68 int prev_batch = -1;
69 To *a_ptr = nullptr;
70 auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
71 {
72 const unsigned int y = id.x();
73 const unsigned int batch = id.y();
74 const unsigned int ymax = std::min(_params.M, y + strategy::out_height());
75
76 // If it's the first block of a new batch then reset the pointer to A.
77 if(prev_batch != static_cast<int>(batch))
78 {
79 const unsigned int first_m = id.x();
80 a_ptr = prepared_a(0, first_m, batch);
81 prev_batch = batch;
82 }
83
84 // Call matrix multiply assembly routine to process the block:
85 strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k);
86 a_ptr += strategy::out_height() * wl._kern_k;
87
88 // Merge the result with the other blocks' results:
89 strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast<Tr>(1)));
90 });
91 auto on_new_row_size = [&](unsigned int start, unsigned int end)
92 {
93 //Nothing to do
94 };
95 window_iterator.iterate_2D(on_new_row_size);
96}
97
98template <typename To, typename Tr, bool use_dot>
99void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::create_workloads(std::vector<MatrixMultiplyWorkload> &workloads)
100{
Georgios Pinitasecae3a12018-10-09 15:13:12 +0100101 using strategy = typename Kernel<To, use_dot>::strategy;
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100102
103 unsigned int offset_transformed_b = 0;
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000104 unsigned int wl_index = 0;
105 unsigned int num_buffers = 0, reshaped_block_size = 0;
106
107 if(!_b_is_pretransposed)
108 {
109 num_buffers = _transformed_b->info()->tensor_shape()[1];
110 reshaped_block_size = _transformed_b->info()->tensor_shape()[0];
111 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100112 execute_window_loop(_block_walker, [&](const Coordinates & id)
113 {
114 const unsigned int x0 = id.x();
115 const unsigned int k0 = id.y();
116 const unsigned int multi = id.z();
117
118 const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N);
119 const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K);
120
121 // Figure out how many "K" the kernel will actually process.
122 const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll());
123 const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width());
124
125 workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks));
126
127 if(_b_is_pretransposed)
128 {
129 offset_transformed_b += bblocks * strategy::out_width() * kern_k;
130 }
131 else
132 {
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000133 // Rotate through the BufferManager's buffers:
134 wl_index++;
135 offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100136 }
137 });
138}
139
140//TODO: regroup somewhere ?
141template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float, float>;
142#ifdef __aarch64__
143template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t>;
144template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t>;
145template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t, true>;
146template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t, true>;
147#endif /* __aarch64__ */
148
149#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
150template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float16_t, float16_t>;
151#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
152} // namespace arm_compute