blob: ca6d20a8388e240a96f00c5d1a1445b9aef1d06a [file] [log] [blame]
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018 Arm Limited.
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE
26
27#include "tests/Globals.h"
28#include "tests/framework/Asserts.h"
29#include "tests/framework/Fixture.h"
30#include "tests/validation/reference/BatchToSpaceLayer.h"
31
32namespace arm_compute
33{
34namespace test
35{
36namespace validation
37{
38template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
39class BatchToSpaceLayerValidationFixture : public framework::Fixture
40{
41public:
42 template <typename...>
Michalis Spyrouf1addb62018-09-11 11:16:47 +010043 void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010044 {
Michalis Spyrouf1addb62018-09-11 11:16:47 +010045 _target = compute_target(input_shape, block_shape_shape, output_shape, data_type, data_layout);
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010046 _reference = compute_reference(input_shape, block_shape_shape, output_shape, data_type);
47 }
48
49protected:
50 template <typename U>
51 void fill(U &&tensor, int i)
52 {
53 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
54 library->fill(tensor, distribution, i);
55 }
Michalis Spyrouf1addb62018-09-11 11:16:47 +010056 TensorType compute_target(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape,
57 DataType data_type, DataLayout data_layout)
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010058 {
Michalis Spyrouf1addb62018-09-11 11:16:47 +010059 if(data_layout == DataLayout::NHWC)
60 {
61 permute(input_shape, PermutationVector(2U, 0U, 1U));
62 permute(output_shape, PermutationVector(2U, 0U, 1U));
63 }
64
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010065 // Create tensors
Michalis Spyrouf1addb62018-09-11 11:16:47 +010066 TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010067 TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32);
Michalis Spyrouf1addb62018-09-11 11:16:47 +010068 TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010069
70 // Create and configure function
71 FunctionType batch_to_space;
72 batch_to_space.configure(&input, &block_shape, &output);
73
74 ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
75 ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
76 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
77
78 // Allocate tensors
79 input.allocator()->allocate();
80 block_shape.allocator()->allocate();
81 output.allocator()->allocate();
82
83 ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
84 ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
85 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
86
87 // Fill tensors
88 fill(AccessorType(input), 0);
89 {
90 auto block_shape_data = AccessorType(block_shape);
Michalis Spyrouf1addb62018-09-11 11:16:47 +010091 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010092 for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
93 {
Michalis Spyrouf1addb62018-09-11 11:16:47 +010094 static_cast<int32_t *>(block_shape_data.data())[i] = output_shape[i + idx_width] / input_shape[i + idx_width];
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +010095 }
96 }
97 // Compute function
98 batch_to_space.run();
99
100 return output;
101 }
102
103 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape,
104 const TensorShape &output_shape, DataType data_type)
105 {
106 // Create reference
107 SimpleTensor<T> input{ input_shape, data_type };
108 SimpleTensor<int32_t> block_shape{ block_shape_shape, DataType::S32 };
109
110 // Fill reference
111 fill(input, 0);
112 for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
113 {
114 block_shape[i] = output_shape[i] / input_shape[i];
115 }
116
117 // Compute reference
118 return reference::batch_to_space(input, block_shape, output_shape);
119 }
120
121 TensorType _target{};
122 SimpleTensor<T> _reference{};
123};
124} // namespace validation
125} // namespace test
126} // namespace arm_compute
127#endif /* ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE */