blob: 5285f279e749a96710aeb805bae7d66f89f6db5a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "CL/CLAccessor.h"
25#include "CL/Helper.h"
26#include "Globals.h"
27#include "TensorLibrary.h"
28#include "benchmark/Datasets.h"
29#include "benchmark/Profiler.h"
30#include "benchmark/WallClockTimer.h"
31
32#include "arm_compute/core/Helpers.h"
33#include "arm_compute/core/Types.h"
34#include "arm_compute/runtime/CL/CLScheduler.h"
35#include "arm_compute/runtime/CL/CLTensor.h"
36#include "arm_compute/runtime/CL/CLTensorAllocator.h"
37#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
38
39#include "benchmark/benchmark_api.h"
40
41using namespace arm_compute;
42using namespace arm_compute::test;
43using namespace arm_compute::test::benchmark;
44using namespace arm_compute::test::cl;
45
46#include "benchmark/common/PoolingLayer.h"
47
48namespace
49{
50using PoolingLayerAlexNet = PoolingLayer<AlexNetPoolingLayerDataset, CLTensor, CLAccessor, CLPoolingLayer>;
51using PoolingLayerLeNet5 = PoolingLayer<LeNet5PoolingLayerDataset, CLTensor, CLAccessor, CLPoolingLayer>;
52using PoolingLayerGoogLeNet = PoolingLayer<GoogLeNetPoolingLayerDataset, CLTensor, CLAccessor, CLPoolingLayer>;
53} // namespace
54
55BENCHMARK_DEFINE_F(PoolingLayerAlexNet, cl_alexnet)
56(::benchmark::State &state)
57{
58 while(state.KeepRunning())
59 {
60 // Run function
61 profiler.start();
62 pool_layer.run();
63 CLScheduler::get().sync();
64 profiler.stop();
65 }
66}
67
68BENCHMARK_REGISTER_F(PoolingLayerAlexNet, cl_alexnet)
69->Threads(1)
70->Apply(DataSetArgBatched<AlexNetPoolingLayerDataset, 0, 1, 4, 8>);
71BENCHMARK_REGISTER_F(PoolingLayerAlexNet, cl_alexnet)
72->Threads(1)
73->Apply(DataSetArgBatched<AlexNetPoolingLayerDataset, 1, 1, 4, 8>);
74BENCHMARK_REGISTER_F(PoolingLayerAlexNet, cl_alexnet)
75->Threads(1)
76->Apply(DataSetArgBatched<AlexNetPoolingLayerDataset, 2, 1, 4, 8>);
77
78BENCHMARK_DEFINE_F(PoolingLayerLeNet5, cl_lenet5)
79(::benchmark::State &state)
80{
81 while(state.KeepRunning())
82 {
83 // Run function
84 profiler.start();
85 pool_layer.run();
86 CLScheduler::get().sync();
87 profiler.stop();
88 }
89}
90
91BENCHMARK_REGISTER_F(PoolingLayerLeNet5, cl_lenet5)
92->Threads(1)
93->Apply(DataSetArgBatched<LeNet5PoolingLayerDataset, 0, 1, 4, 8>);
94BENCHMARK_REGISTER_F(PoolingLayerLeNet5, cl_lenet5)
95->Threads(1)
96->Apply(DataSetArgBatched<LeNet5PoolingLayerDataset, 1, 1, 4, 8>);
97
98BENCHMARK_DEFINE_F(PoolingLayerGoogLeNet, cl_googlenet)
99(::benchmark::State &state)
100{
101 while(state.KeepRunning())
102 {
103 // Run function
104 profiler.start();
105 pool_layer.run();
106 CLScheduler::get().sync();
107 profiler.stop();
108 }
109}
110
111// FIXME: Add support for 7x7 pooling layer pool5/7x7_s1
112BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
113->Threads(1)
114->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 0, 1, 4, 8>);
115BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
116->Threads(1)
117->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 1, 1, 4, 8>);
118BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
119->Threads(1)
120->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 2, 1, 4, 8>);
121BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
122->Threads(1)
123->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 3, 1, 4, 8>);
124BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
125->Threads(1)
126->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 4, 1, 4, 8>);
127BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
128->Threads(1)
129->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 5, 1, 4, 8>);
130BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
131->Threads(1)
132->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 6, 1, 4, 8>);
133BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
134->Threads(1)
135->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 7, 1, 4, 8>);
136BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
137->Threads(1)
138->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 8, 1, 4, 8>);
139BENCHMARK_REGISTER_F(PoolingLayerGoogLeNet, cl_googlenet)
140->Threads(1)
141->Apply(DataSetArgBatched<GoogLeNetPoolingLayerDataset, 9, 1, 4, 8>);