blob: a30416299878e3c64def62a0116d059b3970e51a [file] [log] [blame]
Michalis Spyrou16934a52018-08-21 18:03:58 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_SPACE_TO_BATCH_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_SPACE_TO_BATCH_LAYER_FIXTURE
26
27#include "tests/Globals.h"
28#include "tests/framework/Asserts.h"
29#include "tests/framework/Fixture.h"
30#include "tests/validation/reference/SpaceToBatch.h"
31
32namespace arm_compute
33{
34namespace test
35{
36namespace validation
37{
38template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
39class SpaceToBatchLayerValidationFixture : public framework::Fixture
40{
41public:
42 template <typename...>
Michalis Spyrou13a51e12018-09-18 13:09:30 +010043 void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
Michalis Spyrou16934a52018-08-21 18:03:58 +010044 {
Michalis Spyrou13a51e12018-09-18 13:09:30 +010045 _target = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout);
Michalis Spyrou16934a52018-08-21 18:03:58 +010046 _reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type);
47 }
48
49protected:
50 template <typename U>
51 void fill(U &&tensor, int i)
52 {
53 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
54 library->fill(tensor, distribution, i);
55 }
56 template <typename U>
57 void fill_pad(U &&tensor, int i)
58 {
59 std::uniform_int_distribution<> distribution(0, 0);
60 library->fill(tensor, distribution, i);
61 }
Michalis Spyrou13a51e12018-09-18 13:09:30 +010062 TensorType compute_target(TensorShape input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, TensorShape output_shape,
63 DataType data_type, DataLayout data_layout)
Michalis Spyrou16934a52018-08-21 18:03:58 +010064 {
Michalis Spyrou13a51e12018-09-18 13:09:30 +010065 if(data_layout == DataLayout::NHWC)
66 {
67 permute(input_shape, PermutationVector(2U, 0U, 1U));
68 permute(output_shape, PermutationVector(2U, 0U, 1U));
69 }
70
Michalis Spyrou16934a52018-08-21 18:03:58 +010071 // Create tensors
Michalis Spyrou13a51e12018-09-18 13:09:30 +010072 TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
Michalis Spyrou16934a52018-08-21 18:03:58 +010073 TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32);
74 TensorType paddings = create_tensor<TensorType>(paddings_shape, DataType::S32);
Michalis Spyrou13a51e12018-09-18 13:09:30 +010075 TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Michalis Spyrou16934a52018-08-21 18:03:58 +010076
77 // Create and configure function
78 FunctionType space_to_batch;
79 space_to_batch.configure(&input, &block_shape, &paddings, &output);
80
81 ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
82 ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
83 ARM_COMPUTE_EXPECT(paddings.info()->is_resizable(), framework::LogLevel::ERRORS);
84 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
85
86 // Allocate tensors
87 input.allocator()->allocate();
88 block_shape.allocator()->allocate();
89 paddings.allocator()->allocate();
90 output.allocator()->allocate();
91
92 ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
93 ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
94 ARM_COMPUTE_EXPECT(!paddings.info()->is_resizable(), framework::LogLevel::ERRORS);
95 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
96
97 // Fill tensors
98 fill(AccessorType(input), 0);
99 fill_pad(AccessorType(paddings), 0);
100 {
Michalis Spyrou13a51e12018-09-18 13:09:30 +0100101 auto block_shape_data = AccessorType(block_shape);
102 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
Michalis Spyrou16934a52018-08-21 18:03:58 +0100103 for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
104 {
Michalis Spyrou13a51e12018-09-18 13:09:30 +0100105 static_cast<int32_t *>(block_shape_data.data())[i] = input_shape[i + idx_width] / output_shape[i + idx_width];
Michalis Spyrou16934a52018-08-21 18:03:58 +0100106 }
107 }
108 // Compute function
109 space_to_batch.run();
110
111 return output;
112 }
113
114 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape,
115 const TensorShape &output_shape, DataType data_type)
116 {
117 // Create reference
118 SimpleTensor<T> input{ input_shape, data_type };
119 SimpleTensor<int32_t> block_shape{ block_shape_shape, DataType::S32 };
120 SimpleTensor<int32_t> paddings{ paddings_shape, DataType::S32 };
121
122 // Fill reference
123 fill(input, 0);
124 fill_pad(paddings, 0);
125 for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
126 {
127 block_shape[i] = input_shape[i] / output_shape[i];
128 }
129
130 // Compute reference
131 return reference::space_to_batch(input, block_shape, paddings, output_shape);
132 }
133
134 TensorType _target{};
135 SimpleTensor<T> _reference{};
136};
137} // namespace validation
138} // namespace test
139} // namespace arm_compute
140#endif /* ARM_COMPUTE_TEST_SPACE_TO_BATCH_LAYER_FIXTURE */