blob: 998c4682ba220f05b7fbfd81c87d81c5daa3efbe [file] [log] [blame]
Pablo Tellof6f23ea2019-07-05 14:00:30 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/CLFunctions.h"
25
26#include "arm_compute/core/Types.h"
27#include "arm_compute/runtime/CL/CLHelpers.h"
28#include "arm_compute/runtime/CL/CLScheduler.h"
29#include "utils/Utils.h"
30
31#include <chrono>
32
33using namespace arm_compute;
34using namespace utils;
35
36namespace
37{
Pablo Tellof6f23ea2019-07-05 14:00:30 +010038} // namespace
39
40class CLCacheExample : public Example
41{
42public:
43 CLCacheExample() = default;
44
45 bool do_setup(int argc, char **argv) override
46 {
47 std::cout << "Once the program has run and created the file cache.bin, rerun with --restore_cache." << std::endl;
48 CLScheduler::get().default_init();
49 auto start_time = std::chrono::high_resolution_clock::now();
50 if(argc > 1)
51 {
52 std::string argv1 = argv[1];
53 std::transform(argv1.begin(), argv1.end(), argv1.begin(), ::tolower);
54 if(argv1 == "--restore_cache")
55 {
56 // Load the precompiled kernels from a file into the kernel library, in this way the next time they are needed
57 // compilation won't be required.
58 restore_program_cache_from_file();
59 }
60 else
61 {
62 std::cout << "Unkown option " << argv1 << std::endl;
63 }
64 }
65
66 // Initialise shapes
67 init_tensor(TensorShape(8U, 4U, 2U), tensor_nchw, DataType::U8, DataLayout::NCHW);
68 init_tensor(TensorShape(2U, 8U, 4U), tensor_nhwc, DataType::U8, DataLayout::NHWC);
69 init_tensor(TensorShape(8U, 4U, 2U), tensor_nchw_result, DataType::U8, DataLayout::NCHW);
70
71 // Create the permutation vector to turn a NCHW tensor to NHWC.
72 // The input tensor is NCHW, which means that the fastest changing coordinate is W=8U.
73 // For permutation vectors the fastest changing coordinate is the one on the left too.
74 // Each element in the permutation vector specifies a mapping from the source tensor to the destination one, thus if we
75 // use 2U in the permutation vector's first element we are telling the function to move the channels to the fastest
76 // changing coordinate in the destination tensor.
77
78 const PermutationVector vector_nchw_to_nhwc(2U, 0U, 1U);
79 permute_nhwc.configure(&tensor_nchw, &tensor_nhwc, vector_nchw_to_nhwc);
80
81 // Allocate and fill tensors
82 tensor_nhwc.allocator()->allocate();
83 tensor_nchw.allocator()->allocate();
84 fill_tensor(tensor_nchw);
85
86 // Demostrate autoconfigure for the output tensor
87 const PermutationVector vector_nhwc_to_nchw(1U, 2U, 0U);
88 permute_nchw.configure(&tensor_nhwc, &tensor_nchw_result, vector_nhwc_to_nchw);
89 tensor_nchw_result.allocator()->allocate();
90
91 auto end_time = std::chrono::high_resolution_clock::now();
92 auto time_elapsed = end_time - start_time;
93 auto time_elapsed_ms = std::chrono::duration_cast<std::chrono::milliseconds>(time_elapsed).count();
94 std::cout << "Configuration time " << time_elapsed_ms << " ms " << std::endl;
95 // Save the opencl kernels to a file
96 save_program_cache_to_file();
97
98 return true;
99 }
100 void do_run() override
101 {
102 permute_nhwc.run();
103 permute_nchw.run();
104 }
105 void do_teardown() override
106 {
107 }
108
109private:
110 void validate_result(CLTensor &reference, CLTensor &result)
111 {
112 reference.map();
113 result.map();
114 Window window;
115 window.use_tensor_dimensions(reference.info()->tensor_shape());
116 Iterator it_ref(&reference, window);
117 Iterator it_res(&result, window);
118 execute_window_loop(window, [&](const Coordinates &)
119 {
120 assert(*reinterpret_cast<unsigned char *>(it_ref.ptr()) == *reinterpret_cast<unsigned char *>(it_res.ptr()));
121 },
122 it_ref, it_res);
123 reference.unmap();
124 result.unmap();
125 }
126
127 void fill_tensor(CLTensor &tensor)
128 {
129 tensor.map();
130 Window window;
131 window.use_tensor_dimensions(tensor.info()->tensor_shape());
132 Iterator it_tensor(&tensor, window);
133 unsigned char val(0);
134 execute_window_loop(window, [&](const Coordinates &)
135 {
136 *reinterpret_cast<unsigned char *>(it_tensor.ptr()) = val++;
137 },
138 it_tensor);
139 tensor.unmap();
140 }
141 void init_tensor(const TensorShape shape, CLTensor &tensor, DataType type, DataLayout layout)
142 {
143 tensor.allocator()->init(TensorInfo(shape, 1, type).set_data_layout(layout));
144 }
145
146 CLTensor tensor_nchw{};
147 CLTensor tensor_nhwc{};
148 CLTensor tensor_nchw_result{};
149 CLPermute permute_nhwc{};
150 CLPermute permute_nchw{};
151};
152
153/** Main program creating an example that demostrates how to load precompiled kernels from a file.
154 *
155 * @param[in] argc Number of arguments
156 * @param[in] argv Arguments
157 */
158int main(int argc, char **argv)
159{
160 return utils::run_example<CLCacheExample>(argc, argv);
161}