TOSA: Support for standalone CLAMP/RELU

Added support for standalone CLAMP/RELU.
Limited to:
-Rank <= 4
-N = 1 if Rank = 4

Signed-off-by: Patrik Gustavsson <patrik.gustavsson@arm.com>
Change-Id: If1a32fb330ce6c67c09ec4b554b4a0688444d5f0
diff --git a/ethosu/vela/tosa_graph_optimiser.py b/ethosu/vela/tosa_graph_optimiser.py
index 49fc997..bade4a9 100644
--- a/ethosu/vela/tosa_graph_optimiser.py
+++ b/ethosu/vela/tosa_graph_optimiser.py
@@ -29,7 +29,6 @@
 from .graph_optimiser_util import set_ifm_ofm_op_shapes
 from .graph_optimiser_util import set_tensor_equivalence
 from .operation import ExplicitScaling
-from .operation import NpuBlockType
 from .operation import Op
 from .operation_util import create_add_nop
 from .operation_util import create_avgpool_nop
@@ -303,20 +302,6 @@
         return op
 
     ifm = op.ifm
-    prev_op = ifm.ops[0]
-
-    # Note: the below checks on prev_op require that a first optimize pass on the full graph has been performed
-    fuseable = (
-        prev_op.run_on_npu
-        and prev_op.type.npu_block_type != NpuBlockType.Default
-        and len(ifm.ops) == 1
-        and len(prev_op.outputs[0].consumers()) == 1
-        and prev_op.activation is None
-    )
-    if not fuseable:
-        print("Warning: relu like op will not be possible to fuse, currently not supported")
-        assert False
-
     zp = ifm.quantization.zero_point if ifm.quantization.zero_point else 0
     if op.ofm.quantization.zero_point is None:
         op.ofm.quantization.zero_point = zp
@@ -326,9 +311,6 @@
         op.attrs["max"] = op.attrs["max_int"] - zp
     elif op.type == Op.ReluN:
         op.attrs["max"] = op.attrs["max_int"] - zp
-    else:
-        print("Warning: Unknown TOSA activation Op")
-        assert False
 
     return op