MLBEDSW-2589: Skip weight compression for CPU ops
This commit fixes a bug where CPU ops were getting
passed on as NPU ops in weight_compressor.py due to
Operation.find_npu_op() incorrectly returning any
op with an 'npu_block_type' attribute (which every
op has) as an NPU op.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I7a758f8d1b1237907816bc1be7b77aff765ae688
diff --git a/ethosu/vela/mark_tensors.py b/ethosu/vela/mark_tensors.py
index b6b2f9f..40ce467 100644
--- a/ethosu/vela/mark_tensors.py
+++ b/ethosu/vela/mark_tensors.py
@@ -371,10 +371,11 @@
src_tens = tens.get_dma_src_tensor()
if src_tens is not None:
op = tens.find_npu_op()
- npu_block_type = op.attrs["npu_block_type"]
- weight_compressor.compress_weights(arch, nng, tens, npu_block_type, 16, 16, op.get_dilation_h_w())
- # Alias compressed weights back into source tensor
- src_tens.copy_compressed_weight_info(tens)
+ if op is not None:
+ npu_block_type = op.attrs["npu_block_type"]
+ weight_compressor.compress_weights(arch, nng, tens, npu_block_type, 16, 16, op.get_dilation_h_w())
+ # Alias compressed weights back into source tensor
+ src_tens.copy_compressed_weight_info(tens)
if verbose_tensor_format:
nng.print_passes_with_tensors()
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index 312e8f3..c41a7eb 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -626,7 +626,7 @@
for op in self.consumers():
if op.type == "DMA":
return op.outputs[0].find_npu_op()
- if "npu_block_type" in op.attrs:
+ if op.run_on_npu:
return op
return None
diff --git a/ethosu/vela/weight_compressor.py b/ethosu/vela/weight_compressor.py
index 45427a1..a275e41 100644
--- a/ethosu/vela/weight_compressor.py
+++ b/ethosu/vela/weight_compressor.py
@@ -507,6 +507,8 @@
tens = ps.weight_tensor
if tens is not None:
op = tens.find_npu_op()
+ if op is None:
+ continue
npu_usage_of_tensor = op.attrs["npu_block_type"]
needs_dma = tens.needs_dma()
if ps.cascade.strategy == SchedulingStrategy.WeightStream and needs_dma: