blob: 47a03ed86571454fb188f9ba6af693c6d32de5f0 [file] [log] [blame]
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001/*
Pablo Tello54e98d92019-02-05 16:16:19 +00002 * Copyright (c) 2018-2019 ARM Limited.
Michalis Spyrou55b3d122018-05-09 09:59:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
35#include "tests/validation/Helpers.h"
36#include "tests/validation/reference/WidthConcatenateLayer.h"
37
38#include <random>
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
47class WidthConcatenateLayerValidationFixture : public framework::Fixture
48{
49public:
50 template <typename...>
51 void setup(TensorShape shape, DataType data_type)
52 {
53 // Create input shapes
54 std::mt19937 gen(library->seed());
Michele Di Giorgio27400b92018-11-01 13:44:05 +000055 std::uniform_int_distribution<> num_dis(2, 8);
Pablo Tello54e98d92019-02-05 16:16:19 +000056 std::uniform_int_distribution<> offset_dis(0, 20);
Michalis Spyrou55b3d122018-05-09 09:59:23 +010057
Pablo Tello54e98d92019-02-05 16:16:19 +000058 const int num_tensors = num_dis(gen);
59
60 std::vector<TensorShape> shapes(num_tensors, shape);
61
62 // vector holding the quantization info:
63 // the last element is the output quantization info
64 // all other elements are the quantization info for the input tensors
65 std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
66 for(auto &qi : qinfo)
67 {
68 qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
69 }
Michalis Spyrou55b3d122018-05-09 09:59:23 +010070 std::bernoulli_distribution mutate_dis(0.5f);
71 std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
72
73 // Generate more shapes based on the input
74 for(auto &s : shapes)
75 {
76 // Randomly change the first dimension
77 if(mutate_dis(gen))
78 {
79 // Decrease the dimension by a small percentage. Don't increase
80 // as that could make tensor too large.
81 s.set(0, s[0] + 2 * static_cast<int>(s[0] * change_dis(gen)));
82 }
83 }
84
Pablo Tello54e98d92019-02-05 16:16:19 +000085 _target = compute_target(shapes, qinfo, data_type);
86 _reference = compute_reference(shapes, qinfo, data_type);
Michalis Spyrou55b3d122018-05-09 09:59:23 +010087 }
88
89protected:
90 template <typename U>
91 void fill(U &&tensor, int i)
92 {
93 library->fill_tensor_uniform(tensor, i);
94 }
95
Pablo Tello54e98d92019-02-05 16:16:19 +000096 TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
Michalis Spyrou55b3d122018-05-09 09:59:23 +010097 {
98 std::vector<TensorType> srcs;
99 std::vector<ITensorType *> src_ptrs;
100
101 // Create tensors
102 srcs.reserve(shapes.size());
103
Pablo Tello54e98d92019-02-05 16:16:19 +0000104 for(size_t j = 0; j < shapes.size(); ++j)
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100105 {
Pablo Tello54e98d92019-02-05 16:16:19 +0000106 srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100107 src_ptrs.emplace_back(&srcs.back());
108 }
109
110 TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
Pablo Tello54e98d92019-02-05 16:16:19 +0000111
112 TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100113
114 // Create and configure function
115 FunctionType width_concat;
116 width_concat.configure(src_ptrs, &dst);
117
118 for(auto &src : srcs)
119 {
120 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
121 }
122
123 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
124
125 // Allocate tensors
126 for(auto &src : srcs)
127 {
128 src.allocator()->allocate();
129 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
130 }
131
132 dst.allocator()->allocate();
133 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
134
135 // Fill tensors
136 int i = 0;
137 for(auto &src : srcs)
138 {
139 fill(AccessorType(src), i++);
140 }
141
142 // Compute function
143 width_concat.run();
144
145 return dst;
146 }
147
Pablo Tello54e98d92019-02-05 16:16:19 +0000148 SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100149 {
150 std::vector<SimpleTensor<T>> srcs;
151
152 // Create and fill tensors
Pablo Tello54e98d92019-02-05 16:16:19 +0000153 for(size_t j = 0; j < shapes.size(); ++j)
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100154 {
Pablo Tello54e98d92019-02-05 16:16:19 +0000155 srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
156 fill(srcs.back(), j);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100157 }
158
Pablo Tello54e98d92019-02-05 16:16:19 +0000159 const TensorShape dst_shape = calculate_width_concatenate_shape(shapes);
160 SimpleTensor<T> dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
161
162 return reference::widthconcatenate_layer<T>(srcs, dst);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100163 }
164
165 TensorType _target{};
166 SimpleTensor<T> _reference{};
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100167};
168} // namespace validation
169} // namespace test
170} // namespace arm_compute
171#endif /* ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE */