IVGCVSW-7846 Refactor ElementBinaryOps to use ElementBinaryLayer

* Refactor all functions to convert Add, Div, Maximum, Minimum, Mul
   and Sub to use ElementwiseBinary layers instead
* Add POW Operation support
* Add REDUCE_PROD Operation support

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I8c91980e585f7ff2561610084e7c6b48fb278171
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 90fd711..790fad6 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -25,7 +25,7 @@
         case OperationType::ABS:
             return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
         case OperationType::ADD:
-            return ConvertAdd(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Add);
         case OperationType::ARGMAX:
             return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
         case OperationType::ARGMIN:
@@ -49,7 +49,7 @@
         case OperationType::DEQUANTIZE:
             return ConvertDequantize(operation, model, data);
         case OperationType::DIV:
-            return ConvertDiv(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Div);
         case OperationType::ELU:
             return ConvertElu(operation, model, data);
         case OperationType::EQUAL:
@@ -103,13 +103,13 @@
         case OperationType::MAX_POOL_2D:
             return ConvertMaxPool2d(operation, model, data);
         case OperationType::MAXIMUM:
-            return ConvertMaximum(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Maximum);
         case OperationType::MEAN:
             return ConvertMean(operation, model, data);
         case OperationType::MINIMUM:
-            return ConvertMinimum(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Minimum);
         case OperationType::MUL:
-            return ConvertMul(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Mul);
         case OperationType::NEG:
             return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
         case OperationType::NOT_EQUAL:
@@ -120,6 +120,8 @@
             return ConvertPadV2(operation, model, data);
         case OperationType::PRELU:
             return ConvertPrelu(operation, model, data);
+        case OperationType::POW:
+            return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Power);
         case OperationType::QUANTIZE:
             return ConvertQuantize(operation, model, data);
         case OperationType::QUANTIZED_LSTM:
@@ -132,6 +134,8 @@
             return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
         case OperationType::REDUCE_MIN:
             return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
+        case OperationType::REDUCE_PROD:
+            return ConvertReduce(operation, model, data, armnn::ReduceOperation::Prod);
         case OperationType::REDUCE_SUM:
             return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
         case OperationType::RELU:
@@ -163,7 +167,7 @@
         case OperationType::STRIDED_SLICE:
             return ConvertStridedSlice(operation, model, data);
         case OperationType::SUB:
-            return ConvertSub(operation, model, data);
+            return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Sub);
         case OperationType::TRANSPOSE:
             return ConvertTranspose(operation, model, data);
         case OperationType::TRANSPOSE_CONV_2D:
@@ -176,80 +180,6 @@
     }
 }
 
-bool Converter::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
-{
-    VLOG(DRIVER) << "Converter::ConvertAdd()";
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2, and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
-    if (!outputOperand)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsAdditionSupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   inputInfo0,
-                                   inputInfo1,
-                                   outputInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
-    };
-
-    if(!IsDynamicTensor(outputInfo))
-    {
-        validateFunc(outputInfo, isSupported);
-    }
-    else
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    startLayer->SetBackendId(setBackend);
-
-    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
-                                        data, nullptr, validateFunc, activationFunction);
-}
-
 bool Converter::ConvertArgMinMax(const Operation& operation,
                                  const Model& model,
                                  ConversionData& data,
@@ -786,7 +716,11 @@
         }
     }
 
-    ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
+    if (inputShapes.size() != inputHandles.size())
+    {
+        return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
+                    inputShapes.size(), inputHandles.size());
+    }
 
     if (inputsHaveBeenReshaped)
     {
@@ -1508,9 +1442,13 @@
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
-bool Converter::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+bool Converter::ConvertElementwiseBinary(const Operation& operation,
+                                         const Model& model,
+                                         ConversionData& data,
+                                         armnn::BinaryOperation binaryOperation)
 {
-    VLOG(DRIVER) << "Converter::ConvertDiv()";
+    VLOG(DRIVER) << "Converter::ConvertElementwiseBinary()";
+    VLOG(DRIVER) << "binaryOperation = " << GetBinaryOperationAsCString(binaryOperation);
 
     LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
     LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
@@ -1520,39 +1458,38 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
+    // The FuseActivation parameter is always the input index 2, and it should be optional
     ActivationFn activationFunction;
     if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
     {
-        return Fail("%s: Operation has invalid inputs", __func__);
+        return Fail("%s: Operation has invalid optional input: activation function", __func__);
     }
 
     const Operand* output = GetOutputOperand(operation, 0, model);
     if (!output)
     {
-        return Fail("%s: Could not read output 0", __func__);
+        return Fail("%s: Could not read output", __func__);
     }
 
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
+    armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
+
     bool isSupported = false;
-    armnn::BackendId setBackend;
     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
         FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsDivisionSupported,
+                                   IsElementwiseBinarySupported,
                                    data.m_Backends,
                                    isSupported,
-                                   setBackend,
+                                   armnn::BackendId(),
                                    input0.GetTensorInfo(),
                                    input1.GetTensorInfo(),
-                                   outputInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
+                                   outputInfo,
+                                   binaryOperation);
     };
 
-    if(!IsDynamicTensor(outputInfo))
+    if (!IsDynamicTensor(outputInfo))
     {
         validateFunc(outputInfo, isSupported);
     }
@@ -1566,18 +1503,18 @@
         return false;
     }
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    startLayer->SetBackendId(setBackend);
-
-    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+    armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
+    if (!layer)
+    {
+        return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
+    }
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
     if (!isReshapeSupported)
     {
         return false;
     }
 
-    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model,
                                         data, nullptr, validateFunc, activationFunction);
 }
 
@@ -2082,7 +2019,8 @@
     {
         return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
     }
-    if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+    int32_t inputDimensions_int = static_cast<int32_t>(inputDimensions);
+    if ((axis < -inputDimensions_int) || (inputDimensions_int <= axis))
     {
         return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
                     inputDimensions, inputDimensions);
@@ -3362,70 +3300,6 @@
     return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
 }
 
-bool Converter::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
-{
-    VLOG(DRIVER) << "Converter::ConvertMaximum()";
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
-    if (!outputOperand)
-    {
-        return Fail("%s: Could not read output", __func__);
-    }
-
-    const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
-
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsMaximumSupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   input0.GetTensorInfo(),
-                                   input1.GetTensorInfo(),
-                                   outInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
-    };
-
-    if(IsDynamicTensor(outInfo))
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-    else
-    {
-        validateFunc(outInfo, isSupported);
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    layer->SetBackendId(setBackend);
-    assert(layer != nullptr);
-    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
-}
-
 bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
 {
     VLOG(DRIVER) << "Converter::ConvertMean()";
@@ -3512,144 +3386,6 @@
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
-bool Converter::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
-{
-    VLOG(DRIVER) << "Converter::ConvertMinimum()";
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsMinimumSupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   input0.GetTensorInfo(),
-                                   input1.GetTensorInfo(),
-                                   outputInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
-    };
-
-    if(IsDynamicTensor(outputInfo))
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-    else
-    {
-        validateFunc(outputInfo, isSupported);
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    layer->SetBackendId(setBackend);
-    assert(layer != nullptr);
-    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
-}
-
-bool Converter::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
-{
-    VLOG(DRIVER) << "Converter::ConvertMul()";
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
-
-    if (outputOperand == nullptr)
-    {
-        return false;
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsMultiplicationSupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   input0.GetTensorInfo(),
-                                   input1.GetTensorInfo(),
-                                   outputInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
-    };
-
-    if(!IsDynamicTensor(outputInfo))
-    {
-        validateFunc(outputInfo, isSupported);
-    }
-    else
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    startLayer->SetBackendId(setBackend);
-
-    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-
-    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
-                                        data, nullptr, validateFunc, activationFunction);
-}
-
 bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
 {
     VLOG(DRIVER) << "Converter::ConvertPad()";
@@ -5328,78 +5064,6 @@
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
 }
 
-bool Converter::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
-{
-    VLOG(DRIVER) << "Converter::ConvertSub()";
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                                   IsSubtractionSupported,
-                                   data.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   input0.GetTensorInfo(),
-                                   input1.GetTensorInfo(),
-                                   outputInfo);
-        ARMNN_NO_DEPRECATE_WARN_END
-    };
-
-    if(IsDynamicTensor(outputInfo))
-    {
-        isSupported = AreDynamicTensorsSupported();
-    }
-    else
-    {
-        validateFunc(outputInfo, isSupported);
-    }
-
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
-    startLayer->SetBackendId(setBackend);
-
-    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
-    if (!isReshapeSupported)
-    {
-        return false;
-    }
-    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
-                                        data, nullptr, validateFunc, activationFunction);
-}
-
 bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
 {
     VLOG(DRIVER) << "Converter::ConvertTanH()";
diff --git a/shim/sl/canonical/Converter.hpp b/shim/sl/canonical/Converter.hpp
index 7e4a89e..bf660b9 100644
--- a/shim/sl/canonical/Converter.hpp
+++ b/shim/sl/canonical/Converter.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,8 +31,6 @@
     static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
 
 private:
-    static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertArgMinMax(const Operation& operation,
                                  const Model& model,
                                  ConversionData& data,
@@ -61,13 +59,16 @@
 
     static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertElementwiseUnary(const Operation& operation,
                                         const Model& model,
                                         ConversionData& data,
                                         armnn::UnaryOperation unaryOperation);
 
+    static bool ConvertElementwiseBinary(const Operation& operation,
+                                         const Model& model,
+                                         ConversionData& data,
+                                         armnn::BinaryOperation binaryOperation);
+
     static bool ConvertElu(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
@@ -107,14 +108,8 @@
 
     static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
-
-    static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
@@ -154,8 +149,6 @@
 
     static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);