blob: f9b0dbd9594ac545daab7b3f13287d911c936512 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "tests/AssetsLibrary.h"
30#include "tests/Globals.h"
31#include "tests/IAccessor.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Fixture.h"
34#include "tests/validation/CPP/GEMMLowp.h"
35#include "tests/validation/Helpers.h"
36
37#include <random>
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
45template <typename TensorType, typename AccessorType, typename FunctionType>
Gian Marcoe75a02b2017-11-08 12:24:09 +000046class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +010047{
48public:
49 template <typename...>
Gian Marcoe75a02b2017-11-08 12:24:09 +000050 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +010051 {
Gian Marcoe75a02b2017-11-08 12:24:09 +000052 _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset);
53 _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +010054 }
55
56protected:
57 template <typename U>
58 void fill(U &&tensor, int i)
59 {
Gian Marcoe75a02b2017-11-08 12:24:09 +000060 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
61 std::uniform_int_distribution<> distribution(1, 254);
Pablo Tello299025a2017-09-29 11:30:12 +010062 library->fill(tensor, distribution, i);
63 }
64
65 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
Gian Marcoe75a02b2017-11-08 12:24:09 +000066 int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +010067 {
68 // Create tensors
Gian Marcoe75a02b2017-11-08 12:24:09 +000069 TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
70 TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
Pablo Tello6ff12a02017-11-02 16:09:35 +000071 TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +010072
Gian Marcoe75a02b2017-11-08 12:24:09 +000073 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
74 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
75
Pablo Tellobf2fb952017-09-29 16:43:25 +010076 // Create and configure function
77 FunctionType gemmlowp;
78 gemmlowp.configure(&a, &b, &c);
79
80 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
81 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
82 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
83
84 // Allocate tensors
85 a.allocator()->allocate();
86 b.allocator()->allocate();
87 c.allocator()->allocate();
88
89 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
90 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
91 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
92
93 // Fill tensors
Gian Marcoe75a02b2017-11-08 12:24:09 +000094 fill(AccessorType(a), 0);
95 fill(AccessorType(b), 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +010096
97 // Compute GEMM function
98 gemmlowp.run();
99 return c;
100 }
101
Gian Marcoe75a02b2017-11-08 12:24:09 +0000102 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
103 int32_t a_offset, int32_t b_offset)
Pablo Tellobf2fb952017-09-29 16:43:25 +0100104 {
105 // Create reference
Gian Marcoe75a02b2017-11-08 12:24:09 +0000106 SimpleTensor<uint8_t> a{ shape_a, DataType::QASYMM8, 1 };
107 SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
Pablo Tellobf2fb952017-09-29 16:43:25 +0100108
109 // Fill reference
Gian Marcoe75a02b2017-11-08 12:24:09 +0000110 fill(a, 0);
111 fill(b, 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100112
Gian Marcoe75a02b2017-11-08 12:24:09 +0000113 return reference::gemmlowp_matrix_multiply_core<uint8_t>(a, b, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100114 }
115
Pablo Tello6ff12a02017-11-02 16:09:35 +0000116 TensorType _target{};
117 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100118};
119
Gian Marcoe75a02b2017-11-08 12:24:09 +0000120template <typename TensorType, typename AccessorType, typename FunctionType>
121class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
122{
123public:
124 template <typename...>
125 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
126 {
127 _target = compute_target(shape, result_offset, result_mult_int, result_shift);
128 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift);
129 }
130
131protected:
132 template <typename U>
133 void fill(U &&tensor, int i)
134 {
135 std::uniform_int_distribution<> distribution(-6000, 6000);
136 library->fill(tensor, distribution, i);
137 }
138
139 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
140 {
141 // Create tensors
142 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
143 TensorType b = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
144
145 // Create and configure function
146 FunctionType output_stage;
147 output_stage.configure(&a, &b, result_offset, result_mult_int, result_shift);
148
149 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
150 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
151
152 // Allocate tensors
153 a.allocator()->allocate();
154 b.allocator()->allocate();
155
156 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
157 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
158
159 // Fill tensors
160 fill(AccessorType(a), 0);
161
162 // Compute GEMM function
163 output_stage.run();
164 return b;
165 }
166
167 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
168 {
169 // Create reference
170 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
171
172 // Fill reference
173 fill(a, 0);
174
175 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, result_offset, result_mult_int, result_shift);
176 }
177
178 TensorType _target{};
179 SimpleTensor<uint8_t> _reference{};
180};
Pablo Tello299025a2017-09-29 11:30:12 +0100181} // namespace validation
182} // namespace test
183} // namespace arm_compute
184#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */