blob: 57165c94b4c833b2c6d44d663487bfe697b2ee3d [file] [log] [blame]
Manuel Bottini769c6382019-08-22 13:13:48 +01001/*
Michalis Spyrouebcebf12020-10-21 00:04:14 +01002 * Copyright (c) 2019-2020 Arm Limited.
Manuel Bottini769c6382019-08-22 13:13:48 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H
25#define ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H
Manuel Bottini769c6382019-08-22 13:13:48 +010026
Manuel Bottini769c6382019-08-22 13:13:48 +010027#include "arm_compute/runtime/IFunction.h"
28#include "arm_compute/runtime/IMemoryManager.h"
29#include "arm_compute/runtime/MemoryGroup.h"
30#include "arm_compute/runtime/NEON/functions/NEPermute.h"
31#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
32#include "arm_compute/runtime/Tensor.h"
33
34#include <memory>
35
36namespace arm_compute
37{
38class ITensor;
Michalis Spyrouebcebf12020-10-21 00:04:14 +010039class NEInstanceNormalizationLayerKernel;
Manuel Bottini769c6382019-08-22 13:13:48 +010040
41/** Basic function to perform a Instance normalization.
42 *
43 * This function runs the following kernels:
44 * -# @ref NEInstanceNormalizationLayerKernel
45 */
46class NEInstanceNormalizationLayer : public IFunction
47{
48public:
49 /** Constructor */
50 NEInstanceNormalizationLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Michalis Spyrouebcebf12020-10-21 00:04:14 +010051 /** Prevent instances of this class from being copied (As this class contains pointers) */
52 NEInstanceNormalizationLayer(const NEInstanceNormalizationLayer &) = delete;
53 /** Prevent instances of this class from being copied (As this class contains pointers) */
54 NEInstanceNormalizationLayer &operator=(const NEInstanceNormalizationLayer &) = delete;
55 /** Prevent instances of this class from being moved (As this class contains non movable objects) */
56 NEInstanceNormalizationLayer(NEInstanceNormalizationLayer &&) = delete;
57 /** Prevent instances of this class from being moved (As this class contains non movable objects) */
58 NEInstanceNormalizationLayer &operator=(NEInstanceNormalizationLayer &&) = delete;
59 /** Default destructor */
60 ~NEInstanceNormalizationLayer();
Manuel Bottini769c6382019-08-22 13:13:48 +010061 /** Set the input and output tensors.
62 *
63 * @param[in, out] input Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
64 * Data types supported: F16/F32. Data layout supported: NHWC, NCHW
65 * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
66 * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
67 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
68 * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
69 */
70 void configure(ITensor *input, ITensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
71
72 /** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer.
73 *
74 * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
75 * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
76 * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
77 * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
78 * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
79 *
80 * @return a status
81 */
82 static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
83
84 // Inherited methods overridden:
85 void run() override;
86
87private:
Michalis Spyrouebcebf12020-10-21 00:04:14 +010088 MemoryGroup _memory_group;
89 std::unique_ptr<NEInstanceNormalizationLayerKernel> _normalization_kernel;
90 bool _is_nchw;
91 NEPermute _permute_input;
92 NEPermute _permute_output;
93 Tensor _permuted_input;
94 Tensor _permuted_output;
Manuel Bottini769c6382019-08-22 13:13:48 +010095};
96}
Michalis Spyrouf4643372019-11-29 16:17:13 +000097#endif /* ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H */