blob: edeefa228a3ed480e4edc170350518a1cedbb79e [file] [log] [blame]
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +01001/*
Pablo Tello54e98d92019-02-05 16:16:19 +00002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Georgios Pinitase29acf12018-07-16 14:40:09 +010029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000036#include "tests/validation/reference/DepthConcatenateLayer.h"
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010037
38#include <random>
39
40namespace arm_compute
41{
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010042namespace test
43{
44namespace validation
45{
Moritz Pflanzerfb205682017-09-12 09:28:56 +010046template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena04a8f8c2017-11-23 11:45:24 +000047class DepthConcatenateLayerValidationFixture : public framework::Fixture
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010048{
49public:
50 template <typename...>
51 void setup(TensorShape shape, DataType data_type)
52 {
53 // Create input shapes
54 std::mt19937 gen(library->seed());
Michalis Spyrou898db6f2018-03-02 11:34:05 +000055 std::uniform_int_distribution<> num_dis(2, 4);
Pablo Tello54e98d92019-02-05 16:16:19 +000056 std::uniform_int_distribution<> offset_dis(0, 20);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010057
Pablo Tello54e98d92019-02-05 16:16:19 +000058 const int num_tensors = num_dis(gen);
59
60 std::vector<TensorShape> shapes(num_tensors, shape);
61
62 // vector holding the quantization info:
63 // the last element is the output quantization info
64 // all other elements are the quantization info for the input tensors
65 std::vector<QuantizationInfo> qinfo(num_tensors + 1, QuantizationInfo());
66
67 for(auto &qi : qinfo)
68 {
69 qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
70 }
71
Michalis Spyrou898db6f2018-03-02 11:34:05 +000072 std::uniform_int_distribution<> depth_dis(1, 3);
73 std::bernoulli_distribution mutate_dis(0.5f);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +010074 std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
75
76 // Generate more shapes based on the input
77 for(auto &s : shapes)
78 {
79 // Set the depth of the tensor
80 s.set(2, depth_dis(gen));
81
82 // Randomly change the first dimension
83 if(mutate_dis(gen))
84 {
85 // Decrease the dimension by a small percentage. Don't increase
86 // as that could make tensor too large. Also the change must be
87 // an even number. Otherwise out depth concatenate fails.
88 s.set(0, s[0] + 2 * static_cast<int>(s[0] * change_dis(gen)));
89 }
90
91 // Repeat the same as above for the second dimension
92 if(mutate_dis(gen))
93 {
94 s.set(1, s[1] + 2 * static_cast<int>(s[1] * change_dis(gen)));
95 }
96 }
97
Pablo Tello54e98d92019-02-05 16:16:19 +000098 _target = compute_target(shapes, qinfo, data_type);
99 _reference = compute_reference(shapes, qinfo, data_type);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100100 }
101
102protected:
103 template <typename U>
104 void fill(U &&tensor, int i)
105 {
106 library->fill_tensor_uniform(tensor, i);
107 }
108
Pablo Tello54e98d92019-02-05 16:16:19 +0000109 TensorType compute_target(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100110 {
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100111 std::vector<TensorType> srcs;
112 std::vector<ITensorType *> src_ptrs;
113
114 // Create tensors
115 srcs.reserve(shapes.size());
116
Pablo Tello54e98d92019-02-05 16:16:19 +0000117 for(size_t j = 0; j < shapes.size(); ++j)
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100118 {
Pablo Tello54e98d92019-02-05 16:16:19 +0000119 srcs.emplace_back(create_tensor<TensorType>(shapes[j], data_type, 1, qinfo[j]));
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100120 src_ptrs.emplace_back(&srcs.back());
121 }
122
Georgios Pinitase29acf12018-07-16 14:40:09 +0100123 TensorShape dst_shape = misc::shape_calculator::calculate_depth_concatenate_shape(src_ptrs);
Pablo Tello54e98d92019-02-05 16:16:19 +0000124 TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, qinfo[shapes.size()]);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100125
126 // Create and configure function
127 FunctionType depth_concat;
128 depth_concat.configure(src_ptrs, &dst);
129
130 for(auto &src : srcs)
131 {
132 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
133 }
134
135 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
136
137 // Allocate tensors
138 for(auto &src : srcs)
139 {
140 src.allocator()->allocate();
141 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
142 }
143
144 dst.allocator()->allocate();
145 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
146
147 // Fill tensors
148 int i = 0;
149 for(auto &src : srcs)
150 {
151 fill(AccessorType(src), i++);
152 }
153
154 // Compute function
155 depth_concat.run();
156
157 return dst;
158 }
159
Pablo Tello54e98d92019-02-05 16:16:19 +0000160 SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type)
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100161 {
162 std::vector<SimpleTensor<T>> srcs;
163
164 // Create and fill tensors
Pablo Tello54e98d92019-02-05 16:16:19 +0000165 for(size_t j = 0; j < shapes.size(); ++j)
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100166 {
Pablo Tello54e98d92019-02-05 16:16:19 +0000167 srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]);
168 fill(srcs.back(), j);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100169 }
170
Pablo Tello54e98d92019-02-05 16:16:19 +0000171 const TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
172 SimpleTensor<T> dst{ dst_shape, data_type, 1, qinfo[shapes.size()] };
173
174 return reference::depthconcatenate_layer<T>(srcs, dst);
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100175 }
176
177 TensorType _target{};
178 SimpleTensor<T> _reference{};
Moritz Pflanzer3ce3ff42017-07-21 17:41:02 +0100179};
180} // namespace validation
181} // namespace test
182} // namespace arm_compute
183#endif /* ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE */