vela: Rename --keep-scale-placement CLI

 - Changed to --cache-bias-scale-tensor

Signed-off-by: Tim Hall <tim.hall@arm.com>
Change-Id: I285fe253f03ba98eff36dbe996ad3a57e2ee3d99
diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py
index b93774d..08ab483 100644
--- a/ethosu/vela/vela.py
+++ b/ethosu/vela/vela.py
@@ -245,7 +245,11 @@
         "--show-cpu-operations", action="store_true", help="Show the operations that fall back to the CPU"
     )
     parser.add_argument(
-        "--keep-scale-placement", action="store_true", help="Keep scale tensors memory placement during scheduling"
+        "--cache-bias-scale-tensor",
+        type=ast.literal_eval,
+        default=True,
+        choices=[True, False],
+        help="Controls the caching of the bias & scale tensors in SRAM (default: %(default)s)",
     )
     parser.add_argument(
         "--cascading",
@@ -416,7 +420,7 @@
         use_ifm_streaming=args.ifm_streaming,
         pareto_metric=args.pareto_metric,
         use_nhcwb16_between_cascaded_passes=args.nhcwb16_between_cascaded_passes,
-        keep_scale_placement=args.keep_scale_placement,
+        cache_bias_scale_tensor=args.cache_bias_scale_tensor,
     )
 
     model_reader_options = model_reader.ModelReaderOptions()