MLBEDSW-4081 Output diff for some combinations of avgpool + relu (update)

Fixed regression when the AveragePool has explicit rescaling.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I121a0cbf9ab15c8862739266e088b5db7805446b
diff --git a/ethosu/vela/high_level_command_to_npu_op.py b/ethosu/vela/high_level_command_to_npu_op.py
index f67114f..318960e 100644
--- a/ethosu/vela/high_level_command_to_npu_op.py
+++ b/ethosu/vela/high_level_command_to_npu_op.py
@@ -214,7 +214,11 @@
         (
             ps.primary_op.activation is None
             or forced_ofm_quantization is not None
-            or (ps.primary_op.type.is_avgpool_op() and ps.primary_op.activation.op_type.is_relu_op())
+            or (
+                ps.primary_op.type.is_avgpool_op()
+                and ps.primary_op.activation.op_type.is_relu_op()
+                and not ps.primary_op.rescale
+            )
         )
         and (ps.primary_op.memory_function != Op.ConcatSliceWrite)
         and not fused_quantize
@@ -347,7 +351,7 @@
     act = NpuActivation(act_op)
     act.min = op.activation.min
     act.max = op.activation.max
-    if act_op is NpuActivationOp.NONE_OR_RELU and op.type.is_avgpool_op():
+    if act_op is NpuActivationOp.NONE_OR_RELU and op.type.is_avgpool_op() and not op.rescale:
         quant = op.ofm.quantization
         if quant and quant.zero_point:  # Zero point is not 0
             scale_f32 = 1 if quant.scale_f32 is None else quant.scale_f32