MLBEDSW-3558: Put FC on CPU when OFM != 2D

This commit adds a constraint to FullyConnected
ops in supported_operators.py that puts any
such op on the CPU if tensor dimensions of the
output(s) are not 2D.

Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I8c898a780b40fc4a1383c09213f0696ea6699b7d
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index ee407b6..4b2938b 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -20,6 +20,7 @@
 from ethosu.vela import architecture_features
 from ethosu.vela.data_type import DataType
 from ethosu.vela.nn_graph import Subgraph
+from ethosu.vela.operation import Op
 from ethosu.vela.operation import Operation
 from ethosu.vela.tensor import create_const_tensor
 from ethosu.vela.tensor import QuantizationParameters
@@ -90,7 +91,8 @@
         else:
             np_type = np.int32
         qp = default_quant_params()
-        qp.zero_point = np.zeros(weights_shape)
+        if op.type is not Op.FullyConnected:
+            qp.zero_point = np.zeros(weights_shape)
         weights = create_const_tensor(
             "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
         )
@@ -98,7 +100,8 @@
     # Optional bias tensor
     if bias_shape is not None:
         qp = default_quant_params()
-        qp.zero_point = np.zeros(bias_shape)
+        if op.type is not Op.FullyConnected:
+            qp.zero_point = np.zeros(bias_shape)
         bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
         op.add_input_tensor(bias)
     return op