[ref model] Change PadOp's padding to Shape

Changed to use pad input of PadOp for testing.
The pad input is now a tensor of tosa.shape type.

moved padding error checking from checkTensorAttributes to eval
modified pad's PadOutputShapeMismatch test generation to avoid
generating output shapes with dimensions <= 0

Signed-off-by: Tai Ly <tai.ly@arm.com>
Change-Id: I437c86d9a012903458a648667f6693db67b97d76
diff --git a/.gitignore b/.gitignore
index e861e24..941cf20 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,7 @@
 
 __pycache__/
 build/
-build-debug/
+debug-build/
 .cache
 compile_commands.json
 dist/
diff --git a/reference_model/src/ops/data_layout.cc b/reference_model/src/ops/data_layout.cc
index a4b4e0a..14f2918 100644
--- a/reference_model/src/ops/data_layout.cc
+++ b/reference_model/src/ops/data_layout.cc
@@ -126,7 +126,7 @@
 OpPad<Rank, Dtype>::OpPad(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, uint64_t id_)
     : GraphNode(sgt_, Op_PAD, id_)
 {
-    setRequiredOperands(1, 1);
+    setRequiredOperands(2, 1);
     setRequiredRank(1);
 
     INIT_ATTRIBUTE(Pad);
@@ -158,25 +158,11 @@
         return 1;
     }
 
-    in  = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
-    out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
+    in      = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
+    padding = dynamic_cast<TosaReference::TensorTemplate<TPadding>*>(inputs[1]);
+    out     = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
     ASSERT_MEM(in && out);
 
-    // padding in spec is 2D array in shape of [Rank, 2]
-    // Reference model implement this as 1D array of [Rank * 2], with ordering:
-    // [Rank0_front, Rank0_back, Rank1_front, Rank1_back, ..., Rank(N-1)_front, Rank(N-1)_back]
-    ERROR_IF(attribute->padding().size() != (Rank * 2), "OpPad: padding length needs to be (rank(input1) * 2)");
-
-    for (int i = 0; i < Rank; i++)
-    {
-        int32_t pad_front = attribute->padding()[2 * i];
-        int32_t pad_back  = attribute->padding()[2 * i + 1];
-        ERROR_IF((pad_front < 0) || (pad_back < 0), "OpPad: padding can't be smaller than 0");
-        ERROR_IF(out->getShape()[i] != pad_front + in->getShape()[i] + pad_back,
-                 "OpPad: output shape not equal to input plus padding");
-        paddings_array[i] = std::make_pair(pad_front, pad_back);
-    }
-
     return 0;
 }
 
@@ -204,6 +190,20 @@
             break;
     }
 
+    // padding is an 1D array of [Rank * 2], with ordering:
+    // [Rank0_front, Rank0_back, Rank1_front, Rank1_back, ..., Rank(N-1)_front, Rank(N-1)_back]
+    TPadding padding_val = this->padding->getTensor();
+    ERROR_IF(padding_val.size() != (Rank * 2), "OpPad: padding length needs to be (rank(input1) * 2)");
+    for (int i = 0; i < Rank; i++)
+    {
+        auto pad_front = padding_val(2 * i);
+        auto pad_back  = padding_val(2 * i + 1);
+        ERROR_IF((pad_front < 0) || (pad_back < 0), "OpPad: padding can't be smaller than 0");
+        ERROR_IF(out->getShape()[i] != pad_front + in->getShape()[i] + pad_back,
+                 "OpPad: output shape not equal to input plus padding");
+        paddings_array[i] = std::make_pair(pad_front, pad_back);
+    }
+
     this->out->getTensor() = this->in->getTensor().pad(this->paddings_array, pad_value);
 
     return GraphNode::eval();
diff --git a/reference_model/src/ops/data_layout.h b/reference_model/src/ops/data_layout.h
index 9341709..e085b8e 100644
--- a/reference_model/src/ops/data_layout.h
+++ b/reference_model/src/ops/data_layout.h
@@ -54,14 +54,17 @@
     virtual int checkTensorAttributes();
     virtual int eval();
 
-    using InEigenType  = typename GetEigenType<Dtype>::type;
-    using OutEigenType = typename GetEigenType<Dtype>::type;
-    using TIn          = Eigen::Tensor<InEigenType, Rank>;
-    using TOut         = Eigen::Tensor<OutEigenType, Rank>;
+    using InEigenType      = typename GetEigenType<Dtype>::type;
+    using InEigenShapeType = typename GetEigenType<TOSA_REF_TYPE_SHAPE>::type;
+    using OutEigenType     = typename GetEigenType<Dtype>::type;
+    using TIn              = Eigen::Tensor<InEigenType, Rank>;
+    using TPadding         = Eigen::Tensor<InEigenShapeType, 1>;
+    using TOut             = Eigen::Tensor<OutEigenType, Rank>;
 
 protected:
     Eigen::array<std::pair<ptrdiff_t, ptrdiff_t>, Rank> paddings_array;
     TosaReference::TensorTemplate<TIn>* in;
+    TosaReference::TensorTemplate<TPadding>* padding;
     TosaReference::TensorTemplate<TOut>* out;
     TosaPadAttribute* attribute;
 };
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 386e243..0db9717 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -1046,6 +1046,19 @@
         )
 
     @staticmethod
+    def tvgPad(testGen, op, dtypeList, shapeList, argsDict, error_name=None):
+        # argsDict["pad"] is 2D array, need to flatten it to get list of values
+        pad_values = argsDict["pad"].flatten()
+        dtypeList[1] = DType.SHAPE
+        shapeList[1] = [len(pad_values)]
+        # Create a new list for the pre-generated data in argsDict["fixed_data"]
+        argsDict["fixed_data"] = [None, pad_values]
+
+        return TosaTensorValuesGen.tvgLazyGenDefault(
+            testGen, op, dtypeList, shapeList, argsDict, error_name
+        )
+
+    @staticmethod
     def tvgTile(testGen, op, dtypeList, shapeList, argsDict, error_name=None):
         dtypeList[1] = DType.SHAPE
         shapeList[1] = [len(argsDict["multiples"])]
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index a347b13..081aff6 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -1487,21 +1487,21 @@
         error_name=None,
         qinfo=None,
     ):
-        assert len(inputs) == 1
+        assert len(inputs) == 2
         a = inputs[0]
+        pad_input = inputs[1]
         padding = args_dict["pad"]
         pad_const_int = args_dict["pad_const_int"]
         pad_const_float = args_dict["pad_const_fp"]
 
         result_tensor = OutputShaper.padOp(self.ser, self.rng, a, padding, error_name)
 
+        # write empty padding into PadAttribute to ensure inputs[1] is used
         attr = ts.TosaSerializerAttribute()
-        attr.PadAttribute(
-            self.ser.builder, padding.flatten(), pad_const_int, pad_const_float
-        )
+        attr.PadAttribute(self.ser.builder, [], pad_const_int, pad_const_float)
 
         # Invalidate Input/Output list for error if checks.
-        input_list = [a.name]
+        input_list = [a.name, pad_input.name]
         output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
@@ -4275,11 +4275,11 @@
         },
         "pad": {
             "op": Op.PAD,
-            "operands": (1, 0),
+            "operands": (2, 0),
             "build_fcn": (
                 build_pad,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgLazyGenDefault,
+                TosaTensorValuesGen.tvgPad,
                 TosaArgGen.agPad,
             ),
             "types": TYPE_FIB,
@@ -5305,7 +5305,7 @@
 
         if error_name == ErrorIf.PadOutputShapeMismatch:
             bad_dim = rng.choice(range(len(output_shape)))
-            output_shape[bad_dim] -= rng.choice([1, 2])
+            output_shape[bad_dim] += rng.choice([1, 2])
         elif error_name == ErrorIf.RankMismatch:
             output_shape = gtu.get_rank_mismatch_shape(rng, output_shape)