Changes for 0.23.0 release

- update serialization_lib hash
- PAD:
    1. make padding as an attribute instead of tensor.
    2. add pad_const_int (for non-float type) / pad_const_fp (for float type)
- TRANSPOSE: make perm as an attribute instead of tensor
- TABLE: make table as attribute instead of tensor
- update examples/ tests

Signed-off-by: Kevin Cheng <kevin.cheng@arm.com>
Change-Id: Iddc446db4b356ba2f36ea4a79b7220b9cfc2aa4e
diff --git a/reference_model/src/ops/data_layout.cc b/reference_model/src/ops/data_layout.cc
index 674227b..05a11e0 100644
--- a/reference_model/src/ops/data_layout.cc
+++ b/reference_model/src/ops/data_layout.cc
@@ -128,10 +128,11 @@
                           uint64_t id_)
     : GraphNode(sgt_, Op_PAD, id_)
 {
-    setRequiredOperands(2, 1);
+    setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
 
     INIT_QINFO(Pad);
+    INIT_ATTRIBUTE(Pad);
 }
 
 template <int Rank, DType Dtype>
@@ -159,9 +160,22 @@
         return 1;
     }
 
-    in       = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
-    out      = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
-    paddings = dynamic_cast<TosaReference::TensorTemplate<ETensor2<int32_t>>*>(inputs[1]);
+    in  = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
+    out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
+    ASSERT_MEM(in && out);
+
+    // padding in spec is 2D array in shape of [Rank, 2]
+    // Reference model implement this as 1D array of [Rank * 2], with ordering:
+    // [Rank0_front, Rank0_back, Rank1_front, Rank1_back, ..., Rank(N-1)_front, Rank(N-1)_back]
+    ERROR_IF(attribute->padding().size() != (Rank * 2), "OpPad: padding length needs to be (rank(input1) * 2)");
+
+    for (int i = 0; i < Rank; i++)
+    {
+        int32_t pad_front = attribute->padding()[2 * i];
+        int32_t pad_back  = attribute->padding()[2 * i + 1];
+        ERROR_IF((pad_front < 0) || (pad_back < 0), "OpPad: padding can't be smaller than 0");
+        paddings_array[i] = std::make_pair(pad_front, pad_back);
+    }
 
     if (this->qinfo && Dtype != DType_INT8)
     {
@@ -174,18 +188,24 @@
 template <int Rank, DType Dtype>
 int OpPad<Rank, Dtype>::eval()
 {
-    // Move this to
-    for (int i = 0; i < Rank; i++)
+    InEigenType pad_value = 0;
+
+    switch (Dtype)
     {
-        ERROR_IF((paddings->getTensor()(i, 0) < 0) || (paddings->getTensor()(i, 1) < 0),
-                 "OpPad: padding can't be smaller than 0");
-        paddings_array[i] = std::make_pair(paddings->getTensor()(i, 0), paddings->getTensor()(i, 1));
+        case DType_BOOL:
+        case DType_INT8:
+        case DType_INT16:
+        case DType_INT32:
+            pad_value = (InEigenType)attribute->pad_const_int();
+            break;
+        case DType_FLOAT:
+            pad_value = (InEigenType)attribute->pad_const_fp();
+            break;
     }
 
-    InEigenType pad_value = 0;
-    if (this->qinfo)
+    if (this->qinfo && Dtype == DType_INT8)
     {
-        pad_value = (InEigenType)this->qinfo->input_zp();
+        pad_value += (InEigenType)this->qinfo->input_zp();
     }
 
     this->out->getTensor() = this->in->getTensor().pad(this->paddings_array, pad_value);
@@ -602,8 +622,10 @@
                                       uint64_t id_)
     : GraphNode(sgt_, Op_TRANSPOSE, id_)
 {
-    setRequiredOperands(2, 1);
+    setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
+
+    INIT_ATTRIBUTE(Transpose);
 }
 
 template <int Rank, DType Dtype>
@@ -634,9 +656,10 @@
         return 1;
     }
 
-    in          = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
-    out         = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
-    perm_tensor = dynamic_cast<TosaReference::TensorTemplate<ETensor1<int32_t>>*>(inputs[1]);
+    in  = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
+    out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
+
+    ASSERT_MEM(in && out);
 
     return 0;
 }
@@ -646,7 +669,7 @@
 {
     for (int32_t d = 0; d < Rank; d++)
     {
-        perm_array[d] = this->perm_tensor->getTensor().data()[d];
+        perm_array[d] = attribute->perm()[d];
         ERROR_IF(perm_array[d] < 0 or perm_array[d] >= Rank, "OpTranspose: index out of boundary");
     }
 
diff --git a/reference_model/src/ops/data_layout.h b/reference_model/src/ops/data_layout.h
index 9f44fc7..bad88e4 100644
--- a/reference_model/src/ops/data_layout.h
+++ b/reference_model/src/ops/data_layout.h
@@ -63,8 +63,8 @@
     Eigen::array<std::pair<ptrdiff_t, ptrdiff_t>, Rank> paddings_array;
     TosaReference::TensorTemplate<TIn>* in;
     TosaReference::TensorTemplate<TOut>* out;
-    TosaReference::TensorTemplate<Eigen::Tensor<int32_t, 2>>* paddings;
     TosaPadQuantInfo* qinfo;
+    TosaPadAttribute* attribute;
 };
 
 template <int InRank, int OutRank, DType Dtype>
@@ -207,8 +207,8 @@
 
 protected:
     Eigen::array<int, Rank> perm_array;
+    TosaTransposeAttribute* attribute;
     TosaReference::TensorTemplate<TIn>* in;
-    TosaReference::TensorTemplate<ETensor1<int32_t>>* perm_tensor;
     TosaReference::TensorTemplate<TOut>* out;
 };
 };    // namespace TosaReference
diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc
index 6808604..415cd1c 100644
--- a/reference_model/src/ops/ewise_binary.cc
+++ b/reference_model/src/ops/ewise_binary.cc
@@ -490,8 +490,10 @@
                                 uint64_t id_)
     : GraphNode(sgt_, Op_TABLE, id_)
 {
-    setRequiredOperands(2, 1);
+    setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
+
+    INIT_ATTRIBUTE(Table);
 }
 
 template <int Rank, DType InDtype>
@@ -509,36 +511,18 @@
         return 1;
     }
 
-    if (inputs[1]->getRank() != 1)
+    ERROR_IF(inputs[0]->getDtype() != InDtype, "OpTable: Unexpected input type");
+    ERROR_IF(attribute->table().size() != TableNumEntries, "OpTable: table attribute size must be %u", TableNumEntries);
+
+    for (uint32_t i = 0; i < TableNumEntries; i++)
     {
-        printNodeValidationError("OpTable: Table must be rank 1 tensor");
-        return 1;
+        table[i] = (TableEigenType)attribute->table()[i];
     }
 
-    if (inputs[0]->getDtype() == DType_INT8)
-    {
-        if (inputs[1]->getElementCount() != 256 || inputs[1]->getDtype() != DType_INT8)
-        {
-            printNodeValidationError("OpTable: Table must be INT8[256] if input is INT8");
-            return 1;
-        }
-        ERROR_IF(outputs[0]->getDtype() != DType_INT8, "OpTable: output tensor must be INT8");
-    }
-    else if (inputs[0]->getDtype() == DType_INT16)
-    {
-        if (inputs[1]->getElementCount() != 513 || inputs[1]->getDtype() != DType_INT16)
-        {
-            printNodeValidationError("OpTable: Table must be INT16[513] if input is INT16");
-            return 1;
-        }
-        ERROR_IF(outputs[0]->getDtype() != DType_INT32, "OpTable: output tensor must be INT32");
-    }
+    in  = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
+    out = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
 
-    in    = dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[0]);
-    table = dynamic_cast<TosaReference::TensorTemplate<TTable>*>(inputs[1]);
-    out   = dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[0]);
-
-    ASSERT_MEM(in && table && out);
+    ASSERT_MEM(in && out);
 
     return 0;
 }
@@ -552,7 +536,7 @@
             this->out->getTensor() = this->in->getTensor().unaryExpr([this](InEigenType in) -> OutEigenType {
                 int32_t input_truncated = std::min<int32_t>(std::max<int32_t>(in, QInMin), QInMax);
                 int32_t index           = input_truncated - QInMin;
-                int32_t value           = this->table->getTensor()(index);
+                int32_t value           = table[index];
 
                 return value;
             });
@@ -568,8 +552,8 @@
                 int32_t frac  = (input_truncated)&0x7F;    // 7-bit fraction
 
                 // 3. interpolate, generate 16.7 (23-bit) output
-                int32_t base  = this->table->getTensor()(index);
-                int32_t next  = this->table->getTensor()(index + 1);
+                int32_t base  = table[index];
+                int32_t next  = table[index + 1];
                 int32_t value = (base << 7) + (next - base) * frac;
 
                 return value;
diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h
index fd4d408..373dfb8 100644
--- a/reference_model/src/ops/ewise_binary.h
+++ b/reference_model/src/ops/ewise_binary.h
@@ -184,26 +184,28 @@
     virtual int checkTensorAttributes();
     virtual int eval();
 
-    static constexpr DType TableDtype        = (InDtype == DType_INT8) ? DType_INT8 : DType_INT16;
-    static constexpr DType OutDtype          = (InDtype == DType_INT8) ? DType_INT8 : DType_INT32;
-    using InEigenType                        = typename GetEigenType<InDtype>::type;
-    using TableEigenType                     = typename GetEigenType<TableDtype>::type;
-    using OutEigenType                       = typename GetEigenType<OutDtype>::type;
-    using TIn                                = Eigen::Tensor<InEigenType, Rank>;
-    using TTable                             = Eigen::Tensor<TableEigenType, 1>;
-    using TOut                               = Eigen::Tensor<OutEigenType, Rank>;
-    static constexpr int32_t IntegerBits     = 9;
-    static constexpr int32_t FractionBits    = 7;
-    static constexpr int32_t NumTableEntries = (1 << IntegerBits);
-    static constexpr int32_t QInMin          = GetQMin<InDtype>::value;
-    static constexpr int32_t QInMax          = GetQMax<InDtype>::value;
-    static constexpr int32_t QOutMin         = GetQMin<OutDtype>::value;
-    static constexpr int32_t QOutMax         = GetQMax<OutDtype>::value;
+    static constexpr DType TableDtype         = (InDtype == DType_INT8) ? DType_INT8 : DType_INT16;
+    static constexpr DType OutDtype           = (InDtype == DType_INT8) ? DType_INT8 : DType_INT32;
+    static constexpr uint32_t TableNumEntries = (InDtype == DType_INT8) ? 256 : 513;
+    using InEigenType                         = typename GetEigenType<InDtype>::type;
+    using TableEigenType                      = typename GetEigenType<TableDtype>::type;
+    using OutEigenType                        = typename GetEigenType<OutDtype>::type;
+    using TIn                                 = Eigen::Tensor<InEigenType, Rank>;
+    using TTable                              = Eigen::Tensor<TableEigenType, 1>;
+    using TOut                                = Eigen::Tensor<OutEigenType, Rank>;
+    static constexpr int32_t IntegerBits      = 9;
+    static constexpr int32_t FractionBits     = 7;
+    static constexpr int32_t NumTableEntries  = (1 << IntegerBits);
+    static constexpr int32_t QInMin           = GetQMin<InDtype>::value;
+    static constexpr int32_t QInMax           = GetQMax<InDtype>::value;
+    static constexpr int32_t QOutMin          = GetQMin<OutDtype>::value;
+    static constexpr int32_t QOutMax          = GetQMax<OutDtype>::value;
 
 protected:
     TosaReference::TensorTemplate<TIn>* in;
-    TosaReference::TensorTemplate<TTable>* table;
     TosaReference::TensorTemplate<TOut>* out;
+    TosaTableAttribute* attribute;
+    std::array<TableEigenType, TableNumEntries> table;
 };
 
 };    // namespace TosaReference