Update version of Black to 22.3.0

Update version of Black to 22.3.0 due to updated dependencies.
Updates to fix reported issues due to new version.

Signed-off-by: Jonas Ohlsson <jonas.ohlsson@arm.com>
Change-Id: I60056aae452093ce8dcea1f499ecced22b25eef1
diff --git a/ethosu/vela/test/extapi/test_extapi_encode_weights.py b/ethosu/vela/test/extapi/test_extapi_encode_weights.py
index 6367cb3..87c504f 100644
--- a/ethosu/vela/test/extapi/test_extapi_encode_weights.py
+++ b/ethosu/vela/test/extapi/test_extapi_encode_weights.py
@@ -24,7 +24,8 @@
 
 
 @pytest.mark.parametrize(
-    "arch", list(NpuAccelerator),
+    "arch",
+    list(NpuAccelerator),
 )
 @pytest.mark.parametrize("dilation_x", [1, 2])
 @pytest.mark.parametrize("dilation_y", [1, 2])
@@ -32,7 +33,12 @@
 @pytest.mark.parametrize("depth_control", [1, 2, 3])
 @pytest.mark.parametrize("weights_shape_and_block_depth", [((16, 16, 16, 16), 8), ((3, 3, 25, 16), 8)])
 def test_encode_weights(
-    arch, weights_shape_and_block_depth, dilation_x, dilation_y, ifm_bitdepth, depth_control,
+    arch,
+    weights_shape_and_block_depth,
+    dilation_x,
+    dilation_y,
+    ifm_bitdepth,
+    depth_control,
 ):
     """
     This unit test checks the interface of the API function but not the functionality.
diff --git a/ethosu/vela/test/test_register_command_stream_util.py b/ethosu/vela/test/test_register_command_stream_util.py
index 985523f..86a48ff 100644
--- a/ethosu/vela/test/test_register_command_stream_util.py
+++ b/ethosu/vela/test/test_register_command_stream_util.py
@@ -131,14 +131,34 @@
     op2 takes 1 block to complete, which results in blockdep 0
     """
     op1 = NpuElementWiseOperation(NpuElementWiseOp.CLZ)
-    op1.ifm = create_feature_map(NpuShape3D(height=1, width=1, depth=1), 1, 0x60, layout=NpuLayout.NHCWB16,)
-    intermediate_fm = create_feature_map(NpuShape3D(height=1, width=1, depth=1), 1, 0xA0, layout=NpuLayout.NHCWB16,)
+    op1.ifm = create_feature_map(
+        NpuShape3D(height=1, width=1, depth=1),
+        1,
+        0x60,
+        layout=NpuLayout.NHCWB16,
+    )
+    intermediate_fm = create_feature_map(
+        NpuShape3D(height=1, width=1, depth=1),
+        1,
+        0xA0,
+        layout=NpuLayout.NHCWB16,
+    )
     op1.ofm = intermediate_fm
     op1.block_config = NpuShape3D(height=1, width=1, depth=4)
     op2 = NpuElementWiseOperation(NpuElementWiseOp.SUB)
-    op2.ifm = create_feature_map(NpuShape3D(height=1, width=1, depth=1), 1, 0x39AC0, layout=NpuLayout.NHCWB16,)
+    op2.ifm = create_feature_map(
+        NpuShape3D(height=1, width=1, depth=1),
+        1,
+        0x39AC0,
+        layout=NpuLayout.NHCWB16,
+    )
     op2.ifm2 = intermediate_fm
-    op2.ofm = create_feature_map(NpuShape3D(height=1, width=1, depth=1), 1, 0xE0, layout=NpuLayout.NHCWB16,)
+    op2.ofm = create_feature_map(
+        NpuShape3D(height=1, width=1, depth=1),
+        1,
+        0xE0,
+        layout=NpuLayout.NHCWB16,
+    )
     op2.block_config = NpuShape3D(height=1, width=1, depth=4)
     arch = create_default_arch(Accelerator.Ethos_U55_128)
     block_dep = calc_blockdep(arch, op1, op2)
@@ -153,8 +173,18 @@
     which results in blockdep 2
     """
     op1 = NpuConv2DOperation()
-    op1.ifm = create_feature_map(NpuShape3D(height=4, width=48, depth=8), 1, 0x4C80, layout=NpuLayout.NHCWB16,)
-    op1.ofm = create_feature_map(NpuShape3D(height=4, width=48, depth=16), 1, 0x6480, layout=NpuLayout.NHCWB16,)
+    op1.ifm = create_feature_map(
+        NpuShape3D(height=4, width=48, depth=8),
+        1,
+        0x4C80,
+        layout=NpuLayout.NHCWB16,
+    )
+    op1.ofm = create_feature_map(
+        NpuShape3D(height=4, width=48, depth=16),
+        1,
+        0x6480,
+        layout=NpuLayout.NHCWB16,
+    )
     op1.kernel = NpuKernel(1, 1)
     op1.weights = [NpuAddressRange(region=1, address=0x4AE0, length=208)]
     op1.biases = [NpuAddressRange(region=1, address=0x49A0, length=160)]
@@ -162,10 +192,20 @@
     op1.block_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
     op1.block_config = NpuShape3D(height=4, width=6, depth=16)
     op2 = NpuConvDepthWiseOperation()
-    op2.ifm = create_feature_map(NpuShape3D(height=3, width=48, depth=16), 1, 0, layout=NpuLayout.NHCWB16,)
+    op2.ifm = create_feature_map(
+        NpuShape3D(height=3, width=48, depth=16),
+        1,
+        0,
+        layout=NpuLayout.NHCWB16,
+    )
     # op2 has two tiles, the lower tile is produced by op1
     op2.ifm.tiles = NpuTileBox(height_0=2, height_1=2, width_0=48, addresses=[0x7680, 0, 0x6480, 0])
-    op2.ofm = create_feature_map(NpuShape3D(height=1, width=24, depth=16), 1, 0x6480, layout=NpuLayout.NHCWB16,)
+    op2.ofm = create_feature_map(
+        NpuShape3D(height=1, width=24, depth=16),
+        1,
+        0x6480,
+        layout=NpuLayout.NHCWB16,
+    )
     op2.kernel = NpuKernel(3, 3, stride_x=2, stride_y=2)
     op2.weights = [NpuAddressRange(region=1, address=0x4BB0, length=208)]
     op2.biases = [NpuAddressRange(region=1, address=0x4A40, length=160)]
@@ -183,8 +223,18 @@
     which results in blockdep 3
     """
     op1 = NpuConv2DOperation()
-    op1.ifm = create_feature_map(NpuShape3D(height=13, width=96, depth=1), 1, 0, layout=NpuLayout.NHWC,)
-    op1.ofm = create_feature_map(NpuShape3D(height=6, width=48, depth=8), 1, 0x7C80, layout=NpuLayout.NHCWB16,)
+    op1.ifm = create_feature_map(
+        NpuShape3D(height=13, width=96, depth=1),
+        1,
+        0,
+        layout=NpuLayout.NHWC,
+    )
+    op1.ofm = create_feature_map(
+        NpuShape3D(height=6, width=48, depth=8),
+        1,
+        0x7C80,
+        layout=NpuLayout.NHCWB16,
+    )
     op1.kernel = NpuKernel(3, 3, stride_x=2, stride_y=2)
     op1.weights = [NpuAddressRange(region=1, address=0x4AE0, length=144)]
     op1.biases = [NpuAddressRange(region=1, address=0x49A0, length=80)]
@@ -192,8 +242,18 @@
     op1.block_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
     op1.block_config = NpuShape3D(height=6, width=3, depth=8)
     op2 = NpuConvDepthWiseOperation()
-    op2.ifm = create_feature_map(NpuShape3D(height=5, width=48, depth=8), 1, 0x7C80, layout=NpuLayout.NHCWB16,)
-    op2.ofm = create_feature_map(NpuShape3D(height=4, width=48, depth=8), 1, 0x4C80, layout=NpuLayout.NHCWB16,)
+    op2.ifm = create_feature_map(
+        NpuShape3D(height=5, width=48, depth=8),
+        1,
+        0x7C80,
+        layout=NpuLayout.NHCWB16,
+    )
+    op2.ofm = create_feature_map(
+        NpuShape3D(height=4, width=48, depth=8),
+        1,
+        0x4C80,
+        layout=NpuLayout.NHCWB16,
+    )
     op2.kernel = NpuKernel(3, 3)
     op2.weights = [NpuAddressRange(region=1, address=0x4BB0, length=112)]
     op2.biases = [NpuAddressRange(region=1, address=0x4A40, length=80)]
diff --git a/ethosu/vela/test/test_tflite_model_semantic.py b/ethosu/vela/test/test_tflite_model_semantic.py
index 84f9916..1e5dbd4 100644
--- a/ethosu/vela/test/test_tflite_model_semantic.py
+++ b/ethosu/vela/test/test_tflite_model_semantic.py
@@ -128,7 +128,14 @@
 def test_constraint_ofm_scale_too_small():
     # Tests handling of OFM scale < 1e-38
     shp = [1, 10, 20, 16]
-    op = testutil.create_elemwise_op(Op.Mul, "mul", shp, shp, shp, ofm_quant=testutil.default_quant_params(),)
+    op = testutil.create_elemwise_op(
+        Op.Mul,
+        "mul",
+        shp,
+        shp,
+        shp,
+        ofm_quant=testutil.default_quant_params(),
+    )
     assert semantic_checker.is_operator_semantic_valid(op)
     op.ofm.quantization.scale_f32 = 1e-43
     assert not semantic_checker.is_operator_semantic_valid(op)
@@ -245,7 +252,12 @@
 
 
 def create_pad_op(
-    in_shape, out_shape, padding, in_dtype=DataType.int8, out_dtype=DataType.int8, pad_dtype=DataType.int32,
+    in_shape,
+    out_shape,
+    padding,
+    in_dtype=DataType.int8,
+    out_dtype=DataType.int8,
+    pad_dtype=DataType.int32,
 ):
     qp = testutil.default_quant_params()
     in0 = Tensor(in_shape, in_dtype, "in")
@@ -259,7 +271,11 @@
 
 def test_constraint_pad_input_count():
     # Incorrect number of input tensors (2)
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
+    )
     assert semantic_checker.is_operator_semantic_valid(op)
     op.add_input_tensor(op.inputs[0].clone())
     assert not semantic_checker.is_operator_semantic_valid(op)
diff --git a/ethosu/vela/test/test_tflite_supported_operators.py b/ethosu/vela/test/test_tflite_supported_operators.py
index e3db791..04d3cba 100644
--- a/ethosu/vela/test/test_tflite_supported_operators.py
+++ b/ethosu/vela/test/test_tflite_supported_operators.py
@@ -345,7 +345,12 @@
 
 
 def create_pad_op(
-    in_shape, out_shape, padding, in_dtype=DataType.int8, out_dtype=DataType.int8, pad_dtype=DataType.int32,
+    in_shape,
+    out_shape,
+    padding,
+    in_dtype=DataType.int8,
+    out_dtype=DataType.int8,
+    pad_dtype=DataType.int32,
 ):
     qp = testutil.default_quant_params()
     in0 = Tensor(in_shape, in_dtype, "in")
@@ -359,11 +364,23 @@
 
 def test_constraint_padded_dimensions():
     # Incorrect padding dimensions, can only pad width and height
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [1, 1], [0, 0]],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
+    )
     assert not support.is_operator_supported(op)
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[1, 1], [1, 1], [0, 0]],
+    )
     assert support.is_operator_supported(op)
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 1]],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[1, 1], [1, 1], [0, 1]],
+    )
     assert not support.is_operator_supported(op)
 
 
@@ -371,12 +388,20 @@
     # PAD operator must be of shape (3,2) or (4,2)
     op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
     assert support.is_operator_supported(op)
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
+    )
     assert not support.is_operator_supported(op)
 
 
 def test_constraint_pad_none():
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[],)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[],
+    )
     assert not support.is_operator_supported(op)