blob: a17d835ac68d18f544538c3ed2063805de67ffc9 [file] [log] [blame]
Ramy Elgammal404462a2022-11-08 02:14:46 +00001/*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "ClComponentElementwiseBinary.h"
25
26#include "arm_compute/core/Validate.h"
27#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
28#include "src/core/CL/CLValidate.h"
29#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.h"
30
31namespace arm_compute
32{
33namespace experimental
34{
35namespace dynamic_fusion
36{
37namespace
38{
39std::set<ElementwiseBinaryCommonAttributes::ElementwiseOp> supported_ops
40{
41 ElementwiseBinaryCommonAttributes::ElementwiseOp::ADD
42};
43}
44
45Status ClComponentElementwiseBinary::validate(const ArgumentPack<ITensorInfo> &tensors, const ElementwiseBinaryCommonAttributes &attributes)
46{
47 const auto lhs = tensors.get_const_tensor(TensorType::ACL_SRC_0);
48 const auto rhs = tensors.get_const_tensor(TensorType::ACL_SRC_1);
49 const auto dst = tensors.get_const_tensor(TensorType::ACL_DST_0);
50
51 // Check operator type
52 ARM_COMPUTE_RETURN_ERROR_ON_MSG(supported_ops.find(attributes.operation()) == supported_ops.end(), "Provided Elementwise operation not supported.");
53
54 // Check validity
55 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst);
56
57 //Check data type for different elementwise operators
58 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16, DataType::S32, DataType::S16, DataType::U8);
59
60 const bool rhs_in_place = (rhs == dst);
61 const bool lhs_in_place = (lhs == dst);
62
63 ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_in_place && lhs_in_place, "Both LHS and RHS cannot be in-place at same time for any elementwise operation.");
64
65 // dst shape is correct
66 const TensorShape out_shape = TensorShape::broadcast_shape(lhs->tensor_shape(), rhs->tensor_shape());
67 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
68 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0), "Wrong shape for dst.");
69 ARM_COMPUTE_RETURN_ERROR_ON_MSG((!rhs_in_place && !lhs_in_place) && detail::have_different_dimensions(lhs->tensor_shape(), dst->tensor_shape(), 0),
70 "Only the rhs operand can be broadcast to match the accumulator's (lhs) shape");
71 // Matching data type
72 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
73 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst);
74
75 // Matching data layout
76 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(lhs, rhs);
77 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(lhs, dst);
78
79 // Batching case not supported yet
80 const size_t idx_batch = get_data_layout_dimension_index(lhs->data_layout(), DataLayoutDimension::BATCHES);
81 ARM_COMPUTE_RETURN_ERROR_ON_MSG((lhs->tensor_shape()[idx_batch] != 1) || (rhs->tensor_shape()[idx_batch] != 1) || (dst->tensor_shape()[idx_batch] != 1), "Batching case not supported yet");
82
83 // All tensor infos are initialized
84 ARM_COMPUTE_RETURN_ERROR_ON(lhs->tensor_shape().total_size() == 0);
85 ARM_COMPUTE_RETURN_ERROR_ON(rhs->tensor_shape().total_size() == 0);
86 ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0);
87
88 // Device requirements are met
89 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(lhs);
90
91 return Status{};
92}
93
94ClComponentElementwiseBinary::ClComponentElementwiseBinary(
95 ComponentId id,
96 const Properties &properties,
97 const ArgumentPack<ITensorInfo> &tensors,
98 const Attributes &attributes)
99 : IGpuKernelComponent{ id, properties, tensors },
100 _component_writer{ std::make_unique<ClTemplateElementwiseBinary>(id, tensors, attributes) }
101{
102}
103ClComponentElementwiseBinary::~ClComponentElementwiseBinary()
104{
105}
106const IGpuTemplateComponentWriter *ClComponentElementwiseBinary::template_writer() const
107{
108 return _component_writer.get();
109}
110} // namespace dynamic_fusion
111} // namespace experimental
112} // namespace arm_compute