blob: c6237813012351b5718dee9177cebd241e7ac6af [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "DelegateOptionsTestHelper.hpp"
7
8namespace armnnDelegate
9{
10
11TEST_SUITE("DelegateOptions")
12{
13
14TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
15{
16 std::stringstream ss;
17 {
18 StreamRedirector redirect(std::cout, ss.rdbuf());
19
20 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
21 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
22 std::vector<float> inputData = { 1, 2, 3, 4 };
23 std::vector<float> divData = { 2, 2, 3, 4 };
24 std::vector<float> expectedResult = { 1, 2, 2, 2 };
25
26 // Enable ReduceFp32ToFp16
27 armnn::OptimizerOptions optimizerOptions(true, true, false, false);
28 armnn::INetworkProperties networkProperties;
29 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties);
30
31 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
32 backends,
33 tensorShape,
34 inputData,
35 inputData,
36 divData,
37 expectedResult,
38 delegateOptions);
39 }
40 // ReduceFp32ToFp16 option is enabled
41 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
42 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
43}
44
45TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
46{
47 std::stringstream ss;
48 {
49 StreamRedirector redirect(std::cout, ss.rdbuf());
50
51 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
52 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
53 std::vector<float> inputData = { 1, 2, 3, 4 };
54 std::vector<float> divData = { 2, 2, 3, 4 };
55 std::vector<float> expectedResult = { 1, 2, 2, 2 };
56
57 // Enable Debug
58 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
59 armnn::INetworkProperties networkProperties;
60 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties);
61
62 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
63 backends,
64 tensorShape,
65 inputData,
66 inputData,
67 divData,
68 expectedResult,
69 delegateOptions);
70 }
71 // Debug option triggered.
72 CHECK(ss.str().find("layerGuid") != std::string::npos);
73 CHECK(ss.str().find("layerName") != std::string::npos);
74 CHECK(ss.str().find("outputSlot") != std::string::npos);
75 CHECK(ss.str().find("shape") != std::string::npos);
76 CHECK(ss.str().find("data") != std::string::npos);
77}
78
79TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
80{
81 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
82 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
83 std::vector<float> inputData = { 1, 2, 3, 4 };
84 std::vector<float> divData = { 2, 2, 3, 4 };
85 std::vector<float> expectedResult = { 1, 2, 2, 2 };
86
87 // Enable debug with debug callback function
88 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
89 bool callback = false;
90 auto mockCallback = [&](armnn::LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
91 {
92 armnn::IgnoreUnused(guid);
93 armnn::IgnoreUnused(slotIndex);
94 armnn::IgnoreUnused(tensor);
95 callback = true;
96 };
97
98 armnn::INetworkProperties networkProperties;
99 armnnDelegate::DelegateOptions delegateOptions(backends,
100 optimizerOptions,
101 networkProperties,
102 armnn::EmptyOptional(),
103 armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
104
105 CHECK(!callback);
106
107 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
108 backends,
109 tensorShape,
110 inputData,
111 inputData,
112 divData,
113 expectedResult,
114 delegateOptions);
115
116 // Check that the debug callback function was called.
117 CHECK(callback);
118}
119
120TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToBf16")
121{
122 std::stringstream ss;
123 {
124 StreamRedirector redirect(std::cout, ss.rdbuf());
125
126 ReduceFp32ToBf16TestImpl();
127 }
128
129 // ReduceFp32ToBf16 option is enabled
130 CHECK(ss.str().find("convert_fp32_to_bf16") != std::string::npos);
131}
132
133TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
134{
135 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
136 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
137 std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
138 std::vector<uint8_t> divData = { 2, 2, 3, 4 };
139 std::vector<uint8_t> expectedResult = { 1, 2, 2, 2};
140
141 armnn::OptimizerOptions optimizerOptions(false, false, false, true);
142 armnn::INetworkProperties networkProperties(true, true);
143 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties);
144
145 DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
146 backends,
147 tensorShape,
148 inputData,
149 inputData,
150 divData,
151 expectedResult,
152 delegateOptions);
153}
154
155}
156
157} // namespace armnnDelegate