MLBEDSW-6703 Add SHAPE operator to supported operators

Added SHAPE operator to the supported operators report.
Updated the constraints for QUANTIZE and SHAPE operator.
Also fixed RESHAPE consuming statically optimised shape.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I1d964d602d3f361a0f16dae8133197280dd84c48
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index 5ed1862..db1c6f1 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -834,7 +834,16 @@
         self.ifm_shapes = []
         self.ofm_shapes = []
 
-        ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = self.get_ifm_ifm2_weights_ofm()
+        ifm_tensor, ifm2_tensor, ofm_tensor = self.get_ifm_ifm2_ofm()
+
+        if self.type == Op.Reshape:
+            # Set ofm shape
+            if len(self.inputs) > 1 and self.inputs[1].values is not None:
+                ofm_tensor.shape = self.inputs[1].values.flatten().tolist()
+                ofm_elements = ofm_tensor.elements()
+                # Stretch dimension
+                if ofm_elements < 0:
+                    ofm_tensor.shape[ofm_tensor.shape.index(-1)] = int(ifm_tensor.elements() / abs(ofm_elements))
 
         # set all shapes to op, as 4D
         if self.type == Op.FullyConnected:
@@ -847,7 +856,7 @@
                 self.ofm_shapes.append(Shape4D([self.ofm.shape[0], 1, 1, self.ofm.shape[1]]))
             else:
                 self.ofm_shapes.append(Shape4D(ofm_tensor.get_full_shape()))
-        if self.type == Op.Softmax:
+        elif self.type == Op.Softmax:
             self.ifm_shapes.append(Shape4D(ifm_tensor.get_full_shape()))
             self.ofm_shapes.append(Shape4D(ofm_tensor.get_full_shape()))
         elif self.type.is_split_op() or self.type.is_concat_op():