blob: 4afaa6d6a1b171c94d5c55c4916e9408d9a652b6 [file] [log] [blame]
Sheri Zhangd813bab2021-04-30 16:53:41 +01001///
2/// Copyright (c) 2017-2021 Arm Limited.
3///
4/// SPDX-License-Identifier: MIT
5///
6/// Permission is hereby granted, free of charge, to any person obtaining a copy
7/// of this software and associated documentation files (the "Software"), to
8/// deal in the Software without restriction, including without limitation the
9/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10/// sell copies of the Software, and to permit persons to whom the Software is
11/// furnished to do so, subject to the following conditions:
12///
13/// The above copyright notice and this permission notice shall be included in all
14/// copies or substantial portions of the Software.
15///
16/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22/// SOFTWARE.
23///
24namespace arm_compute
25{
26/** @page implementation_topic Implementation Topics
27
28@section implementation_topic_windows Windows
29
30A @ref Window represents a workload to execute, it can handle up to @ref Coordinates::num_max_dimensions dimensions.
31Each dimension is defined by a start, end and step.
32
33It can split into subwindows as long as *all* the following rules remain true for all the dimensions:
34
35- max[n].start() <= sub[n].start() < max[n].end()
36- sub[n].start() < sub[n].end() <= max[n].end()
37- max[n].step() == sub[n].step()
38- (sub[n].start() - max[n].start()) % max[n].step() == 0
39- (sub[n].end() - sub[n].start()) % max[n].step() == 0
40
41@section implementation_topic_kernels Kernels
42
43Each implementation of the @ref IKernel interface (base class of all the kernels in the core library) works in the same way:
44
45OpenCL kernels:
46
47@code{.cpp}
48// Initialize the CLScheduler with the default context and default command queue
49// Implicitly initializes the CLKernelLibrary to use ./cl_kernels as location for OpenCL kernels files and sets a default device for which OpenCL programs are built.
50CLScheduler::get().default_init();
51
52cl::CommandQueue q = CLScheduler::get().queue();
53//Create a kernel object:
54MyKernel kernel;
55// Initialize the kernel with the input/output and options you want to use:
56kernel.configure( input, output, option0, option1);
57// Retrieve the execution window of the kernel:
58const Window& max_window = kernel.window();
59// Run the whole kernel in the current thread:
60kernel.run( q, max_window ); // Enqueue the kernel to process the full window on the default queue
61
62// Wait for the processing to complete:
63q.finish();
64@endcode
65
66Neon / CPP kernels:
67
68@code{.cpp}
69//Create a kernel object:
70MyKernel kernel;
71// Initialize the kernel with the input/output and options you want to use:
72kernel.configure( input, output, option0, option1);
73// Retrieve the execution window of the kernel:
74const Window& max_window = kernel.window();
75// Run the whole kernel in the current thread:
76kernel.run( max_window ); // Run the kernel on the full window
77@endcode
78
79@section implementation_topic_multithreading Multi-threading
80
81The previous section shows how to run a Arm® Neon™ / CPP kernel in the current thread, however if your system has several CPU cores, you will probably want the kernel to use several cores. Here is how this can be done:
82
83@code{.cpp}
84 ThreadInfo info;
85 info.cpu_info = &_cpu_info;
86
87 const Window &max_window = kernel->window();
88 const unsigned int num_iterations = max_window.num_iterations(split_dimension);
89 info.num_threads = std::min(num_iterations, _num_threads);
90
91 if(num_iterations == 0)
92 {
93 return;
94 }
95
96 if(!kernel->is_parallelisable() || info.num_threads == 1)
97 {
98 kernel->run(max_window, info);
99 }
100 else
101 {
102 int t = 0;
103 auto thread_it = _threads.begin();
104
105 for(; t < info.num_threads - 1; ++t, ++thread_it)
106 {
107 Window win = max_window.split_window(split_dimension, t, info.num_threads);
108 info.thread_id = t;
109 thread_it->start(kernel, win, info);
110 }
111
112 // Run last part on main thread
113 Window win = max_window.split_window(split_dimension, t, info.num_threads);
114 info.thread_id = t;
115 kernel->run(win, info);
116
117 try
118 {
119 for(auto &thread : _threads)
120 {
121 thread.wait();
122 }
123 }
124 catch(const std::system_error &e)
125 {
126 std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
127 }
128 }
129@endcode
130
131This is a very basic implementation which was originally used in the Arm® Neon™ runtime library by all the Arm® Neon™ functions.
132
133@sa CPPScheduler
134
135@note Some kernels need some local temporary buffer to perform their calculations. In order to avoid memory corruption between threads, the local buffer must be of size: ```memory_needed_per_thread * num_threads``` and a unique thread_id between 0 and num_threads must be assigned to the @ref ThreadInfo object passed to the ```run``` function.
136
137
138@section implementation_topic_cl_scheduler OpenCL kernel library
139
140All OpenCL kernels used by the library are built and stored in @ref CLKernelLibrary.
141If the library is compiled with embed_kernels=0 the application can set the path to the OpenCL kernels by calling @ref CLKernelLibrary::init(), by default the path is set to "./cl_kernels"
142*/
143} // namespace arm_compute