Gian Marco Iodice | 761c8d0 | 2019-06-10 14:46:49 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2019 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #ifndef ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE |
| 25 | #define ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE |
| 26 | |
| 27 | #include "arm_compute/core/TensorShape.h" |
| 28 | #include "arm_compute/core/Types.h" |
| 29 | #include "arm_compute/runtime/CL/functions/CLFuseBatchNormalization.h" |
| 30 | #include "tests/AssetsLibrary.h" |
| 31 | #include "tests/Globals.h" |
| 32 | #include "tests/IAccessor.h" |
| 33 | #include "tests/framework/Asserts.h" |
| 34 | #include "tests/framework/Fixture.h" |
| 35 | #include "tests/validation/Helpers.h" |
| 36 | #include "tests/validation/reference/FuseBatchNormalization.h" |
| 37 | |
| 38 | #include <tuple> |
| 39 | #include <utility> |
| 40 | |
| 41 | namespace arm_compute |
| 42 | { |
| 43 | namespace test |
| 44 | { |
| 45 | namespace validation |
| 46 | { |
| 47 | template <typename TensorType, typename AccessorType, typename FunctionType, int dims_weights, typename T> |
| 48 | class FuseBatchNormalizationFixture : public framework::Fixture |
| 49 | { |
| 50 | public: |
| 51 | template <typename...> |
| 52 | void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta) |
| 53 | { |
| 54 | std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta); |
| 55 | std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, data_layout, with_bias, with_gamma, with_beta); |
| 56 | } |
| 57 | |
| 58 | protected: |
| 59 | template <typename U> |
| 60 | void fill(U &&tensor, int i, float min, float max) |
| 61 | { |
| 62 | library->fill_tensor_uniform(tensor, i, min, max); |
| 63 | } |
| 64 | |
| 65 | std::pair<TensorType, TensorType> compute_target(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta) |
| 66 | { |
| 67 | const TensorShape shape_v(shape_w[dims_weights - 1]); |
| 68 | |
| 69 | if(data_layout == DataLayout::NHWC) |
| 70 | { |
| 71 | permute(shape_w, PermutationVector(2U, 0U, 1U)); |
| 72 | } |
| 73 | |
| 74 | const bool in_place_w = in_place; |
| 75 | const bool in_place_b = with_bias ? in_place : false; |
| 76 | |
| 77 | // Create tensors |
| 78 | TensorType w = create_tensor<TensorType>(shape_w, data_type, 1, QuantizationInfo(), data_layout); |
| 79 | TensorType b = create_tensor<TensorType>(shape_v, data_type); |
| 80 | TensorType mean = create_tensor<TensorType>(shape_v, data_type); |
| 81 | TensorType var = create_tensor<TensorType>(shape_v, data_type); |
| 82 | TensorType w_fused = create_tensor<TensorType>(shape_w, data_type, 1, QuantizationInfo(), data_layout); |
| 83 | TensorType b_fused = create_tensor<TensorType>(shape_v, data_type); |
| 84 | TensorType beta = create_tensor<TensorType>(shape_v, data_type); |
| 85 | TensorType gamma = create_tensor<TensorType>(shape_v, data_type); |
| 86 | |
| 87 | auto b_to_use = with_bias ? &b : nullptr; |
| 88 | auto gamma_to_use = with_gamma ? &gamma : nullptr; |
| 89 | auto beta_to_use = with_beta ? &beta : nullptr; |
| 90 | auto w_fused_to_use = in_place_w ? nullptr : &w_fused; |
| 91 | auto b_fused_to_use = in_place_b ? nullptr : &b_fused; |
| 92 | |
| 93 | // Create and configure function |
| 94 | FunctionType fuse_batch_normalization; |
| 95 | fuse_batch_normalization.configure(&w, &mean, &var, w_fused_to_use, b_fused_to_use, b_to_use, beta_to_use, gamma_to_use, _epsilon); |
| 96 | |
| 97 | ARM_COMPUTE_EXPECT(w.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 98 | ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 99 | ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 100 | ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 101 | ARM_COMPUTE_EXPECT(w_fused.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 102 | ARM_COMPUTE_EXPECT(b_fused.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 103 | ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 104 | ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 105 | |
| 106 | // Allocate tensors |
| 107 | w.allocator()->allocate(); |
| 108 | b.allocator()->allocate(); |
| 109 | mean.allocator()->allocate(); |
| 110 | var.allocator()->allocate(); |
| 111 | w_fused.allocator()->allocate(); |
| 112 | b_fused.allocator()->allocate(); |
| 113 | beta.allocator()->allocate(); |
| 114 | gamma.allocator()->allocate(); |
| 115 | |
| 116 | ARM_COMPUTE_EXPECT(!w.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 117 | ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 118 | ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 119 | ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 120 | ARM_COMPUTE_EXPECT(!w_fused.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 121 | ARM_COMPUTE_EXPECT(!b_fused.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 122 | ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 123 | ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 124 | |
| 125 | // Fill tensors |
| 126 | fill(AccessorType(w), 0U, -1.0f, 1.0f); |
| 127 | fill(AccessorType(b), 1U, -1.0f, 1.0f); |
| 128 | fill(AccessorType(mean), 2U, -1.0f, 1.0f); |
| 129 | fill(AccessorType(var), 3U, 0.0f, 1.0f); |
| 130 | fill(AccessorType(beta), 4U, -1.0f, 1.0f); |
| 131 | fill(AccessorType(gamma), 5U, -1.0f, 1.0f); |
| 132 | |
| 133 | // Compute function |
| 134 | fuse_batch_normalization.run(); |
| 135 | |
| 136 | return std::make_pair(std::move(in_place_w ? w : w_fused), std::move(in_place_b ? b : b_fused)); |
| 137 | } |
| 138 | |
| 139 | std::pair<SimpleTensor<T>, SimpleTensor<T>> compute_reference(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool with_bias, bool with_gamma, bool with_beta) |
| 140 | { |
| 141 | const TensorShape shape_v(shape_w[dims_weights - 1]); |
| 142 | |
| 143 | SimpleTensor<T> w{ shape_w, data_type }; |
| 144 | SimpleTensor<T> b{ shape_v, data_type }; |
| 145 | SimpleTensor<T> mean{ shape_v, data_type }; |
| 146 | SimpleTensor<T> var{ shape_v, data_type }; |
| 147 | SimpleTensor<T> w_fused{ shape_w, data_type }; |
| 148 | SimpleTensor<T> b_fused{ shape_v, data_type }; |
| 149 | SimpleTensor<T> beta{ shape_v, data_type }; |
| 150 | SimpleTensor<T> gamma{ shape_v, data_type }; |
| 151 | |
| 152 | // Fill reference tensor |
| 153 | fill(w, 0U, -1.0f, 1.0f); |
| 154 | fill(b, 1U, -1.0f, 1.0f); |
| 155 | fill(mean, 2U, -1.0f, 1.0f); |
| 156 | fill(var, 3U, 0.0f, 1.0f); |
| 157 | fill(beta, 4U, -1.0f, 1.0f); |
| 158 | fill(gamma, 5U, -1.0f, 1.0f); |
| 159 | |
| 160 | if(!with_bias) |
| 161 | { |
| 162 | // Fill with zeros |
| 163 | fill(b, 0U, 0.0f, 0.0f); |
| 164 | } |
| 165 | |
| 166 | if(!with_gamma) |
| 167 | { |
| 168 | // Fill with ones |
| 169 | fill(gamma, 0U, 1.0f, 1.0f); |
| 170 | } |
| 171 | |
| 172 | if(!with_beta) |
| 173 | { |
| 174 | // Fill with zeros |
| 175 | fill(beta, 0U, 0.0f, 0.0f); |
| 176 | } |
| 177 | |
| 178 | switch(dims_weights) |
| 179 | { |
| 180 | case 3: |
| 181 | // Weights for depth wise convolution layer |
| 182 | reference::fuse_batch_normalization_dwc_layer(w, mean, var, w_fused, b_fused, b, beta, gamma, _epsilon); |
| 183 | break; |
| 184 | case 4: |
| 185 | // Weights for convolution layer |
| 186 | reference::fuse_batch_normalization_conv_layer(w, mean, var, w_fused, b_fused, b, beta, gamma, _epsilon); |
| 187 | break; |
| 188 | default: |
| 189 | ARM_COMPUTE_ERROR("Not supported number of dimensions for the input weights tensor"); |
| 190 | } |
| 191 | |
| 192 | return std::make_pair(std::move(w_fused), std::move(b_fused)); |
| 193 | } |
| 194 | |
| 195 | const float _epsilon{ 0.0001f }; |
| 196 | TensorType _target_w{}; |
| 197 | TensorType _target_b{}; |
| 198 | SimpleTensor<T> _reference_w{}; |
| 199 | SimpleTensor<T> _reference_b{}; |
| 200 | }; |
| 201 | } // namespace validation |
| 202 | } // namespace test |
| 203 | } // namespace arm_compute |
| 204 | #endif /* ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE */ |