blob: e864b4affe53ca6172bcc0d28877f899684a3d7a [file] [log] [blame]
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00001/*
Matthew Bentham945b8da2023-07-12 11:54:59 +00002 * Copyright (c) 2020-2021, 2023 Arm Limited.
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_QLSTM_LAYER_NORMALIZATION_FIXTURE
25#define ARM_COMPUTE_TEST_QLSTM_LAYER_NORMALIZATION_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000029#include "tests/AssetsLibrary.h"
30#include "tests/Globals.h"
31#include "tests/IAccessor.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Fixture.h"
34#include "tests/validation/Helpers.h"
35#include "tests/validation/reference/QLSTMLayerNormalization.h"
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
44class QLSTMLayerNormalizationValidationFixture : public framework::Fixture
45{
46public:
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000047 void setup(TensorShape input_shape, TensorShape weight_shape, TensorShape bias_shape, DataType data_type, QuantizationInfo weight_qinfo)
48 {
49 ARM_COMPUTE_ERROR_ON(data_type != DataType::QSYMM16);
50
51 _data_type = data_type;
52 _qinfo = weight_qinfo;
53
54 _target = compute_target(input_shape, weight_shape, bias_shape);
55 _reference = compute_reference(input_shape, weight_shape, bias_shape);
56 }
57
58protected:
59 template <typename InputType, typename BiasType>
60 void fill(InputType &&input_tensor, InputType &&weight_tensor, BiasType &&bias_tensor)
61 {
62 switch(_data_type)
63 {
64 case DataType::QSYMM16:
65 {
66 // Value ranges are based on reference implementation's test case.
67 constexpr int16_t input_min = -1000;
68 constexpr int16_t input_max = 1000;
69 constexpr int16_t weight_min = 19000;
70 constexpr int16_t weight_max = 27000;
71 constexpr int32_t bias_min = -16000000;
72 constexpr int32_t bias_max = -13000000;
73
74 std::uniform_int_distribution<> input_distribution(input_min, input_max);
75 std::uniform_int_distribution<> weight_distribution(weight_min, weight_max);
76 std::uniform_int_distribution<> bias_distribution(bias_min, bias_max);
77
78 library->fill(input_tensor, input_distribution, 0);
79 library->fill(weight_tensor, weight_distribution, 0);
80 library->fill(bias_tensor, bias_distribution, 0);
81 break;
82 }
83 default:
84 ARM_COMPUTE_ERROR("non-supported data type");
85 break;
86 }
87 }
88
89 void allocate_tensors(const std::vector<TensorType *> &tensors)
90 {
91 for(auto t : tensors)
92 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +010093 ARM_COMPUTE_ASSERT(t->info()->is_resizable());
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000094 t->allocator()->allocate();
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +010095 ARM_COMPUTE_ASSERT(!t->info()->is_resizable());
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000096 }
97 }
98
99 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weight_shape, const TensorShape &bias_shape)
100 {
101 TensorType input = create_tensor<TensorType>(input_shape, _data_type, 1);
102 TensorType weight = create_tensor<TensorType>(weight_shape, _data_type, 1, _qinfo);
103 TensorType bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
104 TensorType output = create_tensor<TensorType>(input_shape, _data_type, 1);
105
106 FunctionType fn;
107 fn.configure(&input, &output, &weight, &bias);
108 allocate_tensors({ &input, &weight, &bias, &output });
109 fill(AccessorType(input), AccessorType(weight), AccessorType(bias));
Sheri Zhang45198c82020-04-14 22:29:36 +0100110 fn.run();
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000111
112 return output;
113 }
114
115 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weight_shape, const TensorShape &bias_shape)
116 {
117 // Create reference
118 SimpleTensor<T> input{ input_shape, _data_type, 1 };
119 SimpleTensor<T> weight{ weight_shape, _data_type, 1, _qinfo };
120 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
121
122 // Fill reference
123 fill(input, weight, bias);
124
125 return reference::qlstm_layer_normalization(input, weight, bias);
126 }
127
128 TensorType _target{};
129 SimpleTensor<T> _reference{};
130 DataType _data_type{};
131 QuantizationInfo _qinfo{};
132};
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000133} // namespace validation
134} // namespace test
135} // namespace arm_compute
136
137#endif /* ARM_COMPUTE_TEST_QLSTM_LAYER_NORMALIZATION_FIXTURE */