MLBEDSW-2813: Handle non-const weights and check shapes

- Added check for non-constant weights in supported operators
- Added check ifm & ifm2 shapes
- Handle None tensors for CPU operators
- Handle missing attributes for Cast operator

Signed-off-by: Andreas Nevalainen <andreas.nevalainen@arm.com>
Change-Id: I2f16d3d44d0c6da5237550b39273cdb9cc3c7607
diff --git a/ethosu/vela/mark_tensors.py b/ethosu/vela/mark_tensors.py
index 03ab83f..208b5b8 100644
--- a/ethosu/vela/mark_tensors.py
+++ b/ethosu/vela/mark_tensors.py
@@ -253,7 +253,6 @@
 
 def mark_tensor_purpose(nng, arch, verbose_tensor_purpose=False):
     def mark_tensor_helper(tens, purpose):
-
         if tens.purpose == TensorPurpose.Unknown or tens.purpose == purpose:
             tens.purpose = purpose
         elif tens.purpose != TensorPurpose.LUT:
@@ -284,6 +283,8 @@
                     )
 
                 for idx, tens in enumerate(op.inputs):
+                    if tens is None:
+                        continue
                     purpose = input_purpose(op, idx) if tens.purpose == TensorPurpose.Unknown else tens.purpose
                     mark_tensor_helper(tens, purpose)
 
diff --git a/ethosu/vela/pass_packing.py b/ethosu/vela/pass_packing.py
index a1b03fe..f49f981 100644
--- a/ethosu/vela/pass_packing.py
+++ b/ethosu/vela/pass_packing.py
@@ -318,6 +318,8 @@
                             print("Warning:", curr_op.type, "operation is unknown or unsupported, placing on CPU")
 
                         for inp in reversed(curr_op.inputs):
+                            if inp is None:
+                                continue
                             can_pack = True
                             if len(inp.ops) == 1:
                                 next_op = inp.ops[0]
@@ -390,6 +392,8 @@
         # Check primary_op first
         if primary_op is not None:
             for inp in primary_op.inputs:
+                if inp is None:
+                    continue
                 if len(inp.ops) == 1 and inp.ops[0].type == "DMA" and inp.purpose == TensorPurpose.FeatureMap:
                     src_op = inp.ops[0]
                     if src_op in input_ops_list:
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index b6551cf..b0afa2c 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -227,6 +227,12 @@
         # check batch size
         if ifm_tensor.shape[0] != 1:
             return False
+
+        # check non const weights
+        if weight_tensor.values is None:
+            print("Warning:", op.type, "has non-const weights, placing on CPU")
+            return False
+
         return True
 
     def check_depthwise_convolution_restrictions(self, op):
@@ -317,6 +323,11 @@
         if not self.check_bias_restrictions(bias_tensor):
             return False
 
+        # check non const weights
+        if weight_tensor.values is None:
+            print("Warning:", op.type, "has non-const weights, placing on CPU")
+            return False
+
         return True
 
     def check_element_wise_restrictions(self, op):
@@ -362,6 +373,10 @@
         if op.type == "LeakyRelu" and op.attrs["alpha"] < 0:
             return False
 
+        # check if ifm or ifm2 has ofm shape
+        if ifm_tensor.shape != ofm_tensor.shape and ifm2_tensor.shape != ofm_tensor.shape:
+            return False
+
         return True
 
     def check_memory_only_restrictions(self, op):
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index a2f744d..7458b90 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -152,7 +152,8 @@
         activation_function_to_split_out = None
 
         if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
-            inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
+            if inputs[1].values is not None:
+                inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
             if len(inputs) < 3 or (len(inputs) < 4 and "Backprop" in op_type):
                 # No Bias tensor
                 inputs.append(None)
@@ -160,7 +161,8 @@
                 inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,))
 
         if op_type.startswith("FullyConnected"):
-            inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
+            if inputs[1].values is not None:
+                inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
             if len(inputs) < 3:
                 # No Bias tensor
                 inputs.append(None)
@@ -174,6 +176,13 @@
                 # Reshape should have an attrib "new_shape" but if it is missing, add it based on the output shape
                 op.attrs["new_shape"] = outputs[0].shape
 
+            if op_type == "Cast":
+                # Cast op should have "in/out_data_type" attribs add if missing
+                if "in_data_type" not in op.attrs:
+                    op.attrs["in_data_type"] = inputs[0].dtype
+                if "out_data_type" not in op.attrs:
+                    op.attrs["out_data_type"] = outputs[0].dtype
+
             if "stride_w" in op.attrs:
                 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
             if "filter_width" in op.attrs:
diff --git a/ethosu/vela/tflite_writer.py b/ethosu/vela/tflite_writer.py
index cb208d7..68af487 100644
--- a/ethosu/vela/tflite_writer.py
+++ b/ethosu/vela/tflite_writer.py
@@ -90,9 +90,13 @@
                     if op.type not in self.ops_to_ignore:
                         all_ops.append(op)
                     if op.type.startswith("Conv2D") or op.type.startswith("DepthwiseConv2d"):
-                        self.tensors_to_reshape[op.inputs[1]] = (3, 0, 1, 2)
+                        # If values are None op has non-constant weights
+                        if op.inputs[1].values is not None:
+                            self.tensors_to_reshape[op.inputs[1]] = (3, 0, 1, 2)
                     if op.type.startswith("FullyConnected"):
-                        self.tensors_to_reshape[op.inputs[1]] = (1, 0)
+                        # If values are None op has non-constant weights
+                        if op.inputs[1].values is not None:
+                            self.tensors_to_reshape[op.inputs[1]] = (1, 0)
 
         self.operator_codes = list(sorted(set(op.type for op in all_ops)))
         self.operator_code_map = {}
@@ -314,7 +318,8 @@
         # e.g. due to an empty graph containing no ops
         for op in all_ops + placeholder_ops:
             for tens in op.inputs + op.outputs:
-                tensor_set.add(tens)
+                if tens is not None:
+                    tensor_set.add(tens)
 
         all_tensors = [tens for nm, idx, tens in sorted((tens.name, idx, tens) for idx, tens in enumerate(tensor_set))]