Update to v0.22.0

- remove identityN and placeholder
- add div
- update serialization_lib hash
- update apply_scale_16() assertion
- regenerate examples/ due to serialization_lib change

Change-Id: I7183d92bec33697c65adfc07cb8eb89c6882675a
diff --git a/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa b/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa
index 7a195af..003d1bb 100644
--- a/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa
+++ b/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa
Binary files differ
diff --git a/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa b/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa
index c686131..864aaac 100644
--- a/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa
+++ b/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa
Binary files differ
diff --git a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
index 1fb5f17..ea794c8 100644
--- a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
+++ b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
Binary files differ
diff --git a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
index d1b740b..dfc7c9c 100644
--- a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
+++ b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa
Binary files differ
diff --git a/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa
index 30bd194..1e21d49 100644
--- a/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa
+++ b/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa
Binary files differ
diff --git a/reference_model/src/ops/control_flow.cc b/reference_model/src/ops/control_flow.cc
index 827e01f..1a6a63a 100644
--- a/reference_model/src/ops/control_flow.cc
+++ b/reference_model/src/ops/control_flow.cc
@@ -93,6 +93,8 @@
             return 1;
         }
 
+        tensor->setIsValid();
+
         // Push ready consumers to the next node list
         for (auto gn : tensor->getConsumers())
         {
@@ -292,8 +294,7 @@
 int OpWhileLoop::eval()
 {
 
-    TosaReference::Tensor0<bool> cond_output_ctensor(std::string("cond_output"), DType_BOOL,
-                                                     std::vector<int32_t>({}));
+    TosaReference::Tensor0<bool> cond_output_ctensor(std::string("cond_output"), DType_BOOL, std::vector<int32_t>({}));
 
     cond_output_ctensor.allocate();
     std::vector<TosaReference::Tensor*> cond_block_outputs;
diff --git a/reference_model/src/ops/data_nodes.cc b/reference_model/src/ops/data_nodes.cc
index 883cd1b..baae019 100644
--- a/reference_model/src/ops/data_nodes.cc
+++ b/reference_model/src/ops/data_nodes.cc
@@ -42,29 +42,6 @@
     return GraphNode::eval();
 }
 
-OpPlaceholder::OpPlaceholder(uint64_t id_)
-    : GraphNode(Op_PLACEHOLDER, id_)
-{
-    setRequiredOperands(0, 1);
-}
-
-OpPlaceholder::~OpPlaceholder()
-{}
-
-int OpPlaceholder::checkTensorAttributes()
-{
-    if (validateRequiredOperands())
-        return 1;
-
-    return 0;
-}
-
-int OpPlaceholder::eval()
-{
-    // Evaluation is trivial for placeholders
-    return GraphNode::eval();
-}
-
 template <int Rank, DType Dtype>
 OpIdentity<Rank, Dtype>::OpIdentity(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
     : GraphNode(Op_IDENTITY, id_)
@@ -107,64 +84,11 @@
     return GraphNode::eval();
 }
 
-template <int Rank, DType Dtype>
-OpIdentityN<Rank, Dtype>::OpIdentityN(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_IDENTITYN, id_)
-{
-    setRequiredRank(0, 6);
-}
-
-template <int Rank, DType Dtype>
-OpIdentityN<Rank, Dtype>::~OpIdentityN()
-{}
-
-template <int Rank, DType Dtype>
-int OpIdentityN<Rank, Dtype>::checkTensorAttributes()
-{
-
-    if (inputs.size() != outputs.size())
-    {
-        printNodeValidationError("Input and output tensor list lengths are not equal");
-        return 1;
-    }
-
-    for (size_t i = 0; i < inputs.size(); i++)
-    {
-        ins.push_back(dynamic_cast<TosaReference::TensorTemplate<TIn>*>(inputs[i]));
-        outs.push_back(dynamic_cast<TosaReference::TensorTemplate<TOut>*>(outputs[i]));
-
-        if (ins[i]->matchRankTypeShape(*outs[i]))
-        {
-            printNodeValidationError("Input and output tensor rank, type, or shape do not match");
-            return 1;
-        }
-    }
-
-    return 0;
-}
-
-template <int Rank, DType Dtype>
-int OpIdentityN<Rank, Dtype>::eval()
-{
-    for (size_t i = 0; i < ins.size(); i++)
-    {
-        outs[i]->getTensor() = ins[i]->getTensor();
-    }
-
-    return GraphNode::eval();
-}
-
 // template explicit instantiation
-// note OpConst and OpPlaceholder are not templated
+// note OpConst is not templated
 
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, FLOAT);
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT8);
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT16);
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT32);
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, BOOL);
-
-DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, FLOAT);
-DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT8);
-DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT16);
-DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT32);
-DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, BOOL);
diff --git a/reference_model/src/ops/data_nodes.h b/reference_model/src/ops/data_nodes.h
index bec4669..a02d441 100644
--- a/reference_model/src/ops/data_nodes.h
+++ b/reference_model/src/ops/data_nodes.h
@@ -31,16 +31,6 @@
     virtual int eval();
 };
 
-class OpPlaceholder : public GraphNode
-{
-public:
-    OpPlaceholder(uint64_t id_);
-    virtual ~OpPlaceholder();
-
-    virtual int checkTensorAttributes();
-    virtual int eval();
-};
-
 template <int Rank, DType Dtype>
 class OpIdentity : public GraphNode
 {
@@ -61,26 +51,6 @@
     TosaReference::TensorTemplate<TOut>* out;
 };
 
-template <int Rank, DType Dtype>
-class OpIdentityN : public GraphNode
-{
-public:
-    OpIdentityN(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
-    virtual ~OpIdentityN();
-
-    virtual int checkTensorAttributes();
-    virtual int eval();
-
-    using InEigenType  = typename GetEigenType<Dtype>::type;
-    using OutEigenType = typename GetEigenType<Dtype>::type;
-    using TIn          = Eigen::Tensor<InEigenType, Rank>;
-    using TOut         = Eigen::Tensor<OutEigenType, Rank>;
-
-protected:
-    std::vector<TosaReference::TensorTemplate<TIn>*> ins;
-    std::vector<TosaReference::TensorTemplate<TOut>*> outs;
-};
-
 };    // namespace TosaReference
 
 #endif
diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc
index fc587f1..76cebeb 100644
--- a/reference_model/src/ops/ewise_binary.cc
+++ b/reference_model/src/ops/ewise_binary.cc
@@ -298,6 +298,27 @@
 }
 
 template <int Rank, DType Dtype>
+int OpDiv<Rank, Dtype>::register_fcn()
+{
+    switch (InDtype)
+    {
+        case DType_INT32:
+            this->fcn = [this](InEigenType a, InEigenType b) -> OutEigenType {
+                ASSERT_MSG_NODE(b != 0, "OpDiv: divisor must be non-zero value");
+                int64_t res_in_64     = static_cast<int64_t>(a) / b;
+                int64_t i32_max_in_64 = static_cast<int64_t>(std::numeric_limits<InEigenType>::max());
+                ASSERT_MSG_NODE(a <= i32_max_in_64, "OpDiv: result not in i32 range");
+                return static_cast<InEigenType>(res_in_64);
+            };
+            break;
+        default:
+            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+    }
+
+    return 0;
+}
+
+template <int Rank, DType Dtype>
 int OpLogicalAnd<Rank, Dtype>::register_fcn()
 {
     switch (Dtype)
@@ -579,6 +600,8 @@
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT16);
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT32);
 
+DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpDiv, INT32);
+
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalAnd, BOOL);
 
 DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalLeftShift, INT8);
diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h
index 5bc5630..6b9c98d 100644
--- a/reference_model/src/ops/ewise_binary.h
+++ b/reference_model/src/ops/ewise_binary.h
@@ -125,6 +125,7 @@
 DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseAnd, BITWISE_AND)
 DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseOr, BITWISE_OR)
 DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseXor, BITWISE_XOR)
+DEF_TEMPLATE_BINARY_OP_DEFAULT(Div, DIV)
 DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalAnd, LOGICAL_AND)
 DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalLeftShift, LOGICAL_LEFT_SHIFT)
 DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalRightShift, LOGICAL_RIGHT_SHIFT)
diff --git a/reference_model/src/ops/op_factory.cc b/reference_model/src/ops/op_factory.cc
index b326c63..440d624 100644
--- a/reference_model/src/ops/op_factory.cc
+++ b/reference_model/src/ops/op_factory.cc
@@ -134,6 +134,9 @@
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT16);
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT32);
             break;
+        case Op_DIV:
+            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpDiv, INT32);
+            break;
         case Op_LOGICAL_AND:
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalAnd, BOOL);
             break;
@@ -346,8 +349,6 @@
         // data_nodes
         case Op_CONST:
             return new OpConst(id);
-        case Op_PLACEHOLDER:
-            return new OpPlaceholder(id);
         case Op_IDENTITY:
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, FLOAT);
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT32);
@@ -355,13 +356,6 @@
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT16);
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, BOOL);
             break;
-        case Op_IDENTITYN:
-            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, FLOAT);
-            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT32);
-            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT8);
-            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT16);
-            DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, BOOL);
-            break;
 
         // type_conversion
         case Op_CAST:
diff --git a/reference_model/src/quant_util.h b/reference_model/src/quant_util.h
index f07dd10..c595869 100644
--- a/reference_model/src/quant_util.h
+++ b/reference_model/src/quant_util.h
@@ -65,8 +65,8 @@
     static int32_t apply_scale_16(int64_t value, int16_t multiplier, int32_t shift)
     {
         ASSERT_MSG(multiplier >= 0, "apply_scale_16() error: multiplier should >= 0 but is %d", multiplier);
-        ASSERT_MSG(value >= -(static_cast<int64_t>(1) << 47) && value < (static_cast<int64_t>(1) << 47),
-                   "apply_scale_16() error: value should be within [-(1^47), 1^47]");
+        ASSERT_MSG(shift >= 2 && shift <= 62, "apply_scale_16() error: shift should be within [2, 62] but is %d",
+                   shift);
         int64_t round  = 1L << (shift - 1);
         int64_t result = value * (int64_t)multiplier + round;
         result         = result >> shift;
diff --git a/reference_model/src/subgraph_traverser.cc b/reference_model/src/subgraph_traverser.cc
index 5096ffa..1995b5c 100644
--- a/reference_model/src/subgraph_traverser.cc
+++ b/reference_model/src/subgraph_traverser.cc
@@ -499,7 +499,6 @@
     // For each node, read this list, link up the tensors with their inputs/outputs
     for (GraphNode* currNode : nodes)
     {
-
         // Link inputs/consuming nodes
         for (std::string& name : currNode->getInputNames())
         {
@@ -566,36 +565,18 @@
     for (TosaReference::Tensor* currTensor : tensors)
     {
 
-        if (!currTensor->getProducer() && currTensor->getConsumers().empty())
+        // It's okay for block input tensor not being consumed by operators.
+        // This is common in control flow op execution.
+        if (!currTensor->getIsSubgraphInput())
         {
-            WARNING("Graph inconsistency: TosaReference::Tensor %s has no producers or consumers\n",
-                    currTensor->getName().c_str());
-            return 1;
-        }
-
-        if (currTensor->getIsSubgraphInput())
-        {
-            if (currTensor->getProducer() && currTensor->getProducer()->getOp() != Op_PLACEHOLDER)
+            if (!currTensor->getProducer() && currTensor->getConsumers().empty())
             {
-                WARNING("Graph inconsistency: TosaReference::Tensor %s is a subgraph input and has a producer\n",
+                WARNING("Graph inconsistency: TosaReference::Tensor %s has no producers or consumers\n",
                         currTensor->getName().c_str());
                 return 1;
             }
         }
 
-        // comment this check out as this is possible when graph have multiple output
-        // for example:
-        //   %0 = add(%arg0, %arg1)
-        //   %1 = mul(%arg0, %0)
-        //   yields(%0, %1)
-        //if (currTensor->getIsSubgraphOutput()) {
-        //    if (!currTensor->getConsumers().empty()) {
-        //        WARNING ("Graph inconsistency: TosaReference::Tensor %s is a subgraph output and has a consumer\n",
-        //                     currTensor->getName().c_str());
-        //        return 1;
-        //    }
-        //}
-
         if (g_func_config.tosa_profile == 0)
         {
             DType dtype = currTensor->getDtype();
diff --git a/thirdparty/serialization_lib b/thirdparty/serialization_lib
index 2364dcd..a8b4eaf 160000
--- a/thirdparty/serialization_lib
+++ b/thirdparty/serialization_lib
@@ -1 +1 @@
-Subproject commit 2364dcd7241d730021bf68e000e5a6411b9f09d1
+Subproject commit a8b4eafda31fe41b99a46c09c131ac7295382570
diff --git a/verif/tosa_serializer.py b/verif/tosa_serializer.py
index 726ffc4..5ed9877 100644
--- a/verif/tosa_serializer.py
+++ b/verif/tosa_serializer.py
@@ -548,8 +548,6 @@
         tens = self.currBasicBlock.addTensor(name, shape, dtype, None, filename)
         # This is always an input to the block
         self.currBasicBlock.addInput(name)
-        # Add the operator now
-        self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], name)
 
         if vals is not None:
             np.save(os.path.join(self.pathPrefix, filename), vals, False)
@@ -586,7 +584,6 @@
         return tens
 
     def addInputTensor(self, tensor):
-        self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], tensor.name)
         self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype)
         self.currBasicBlock.addInput(tensor.name)
 
@@ -606,10 +603,8 @@
 
     def addOperator(self, op, inputs, outputs, attributes=None, quant_info=None):
 
-        if op == tosa.Op.Op().PLACEHOLDER or op == tosa.Op.Op().CONST:
-            raise Exception(
-                "Use addPlaceholderTensor() or addConstTensor() to add PLACEHOLDER and CONST ops"
-            )
+        if op == tosa.Op.Op().CONST:
+            raise Exception("Use addConstTensor() to add CONST ops")
 
         return self.currBasicBlock.addOperator(
             op, inputs, outputs, attributes, quant_info
diff --git a/verif/tosa_test_gen.py b/verif/tosa_test_gen.py
index 7731a75..bc97f15 100644
--- a/verif/tosa_test_gen.py
+++ b/verif/tosa_test_gen.py
@@ -1710,10 +1710,101 @@
                     else:
                         raise Exception("OpArithmeticRightShift: invalid input dtype")
                 else:
-                    arr = self.getRandTensor(shapeList[0], dtypeList[idx])
+                    arr = self.getRandTensor(shape, dtypeList[idx])
                 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
 
             tens.extend(placeholders)
+        elif op["op"] == Op.DIV:
+            assert (
+                pCount == 2 and cCount == 0
+            ), "Op.Div must have 2 placeholders, 0 consts"
+
+            placeholders = []
+
+            # Two invalid cases for Op.DIV:
+            # 1. divisor == 0
+            # 2. dividend == (1<<31) and divisor == -1
+            while True:
+                dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
+                divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
+
+                if (divisor_arr == 0).any():
+                    continue
+
+                if (dividend_arr == (2 ** 31)).any() and (divisor_arr == -1).any():
+                    continue
+
+                break
+
+            placeholders.append(
+                self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
+            )
+            placeholders.append(
+                self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
+            )
+
+            tens.extend(placeholders)
+        elif op["op"] == Op.MUL:
+            assert (
+                pCount == 2 and cCount == 0
+            ), "Op.MUL must have 2 placeholders, 0 consts"
+
+            if dtypeList[0] == DType.FLOAT:
+                tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
+            else:
+                placeholders = []
+
+                # Make sure multiply result in int32 range
+                shift = testArgs[0]
+                if dtypeList[0] == DType.INT8:
+                    num_bits = 8
+                elif dtypeList[0] == DType.INT16:
+                    num_bits = 16
+                elif dtypeList[0] == DType.INT32:
+                    num_bits = 32
+                else:
+                    raise Exception("OpMul: invalid input dtype")
+
+                for idx, shape in enumerate(shapeList[:]):
+                    low = -(2 ** (num_bits - 1))
+                    high = (2 ** (num_bits - 1)) - 1
+
+                    a_arr = np.int32(
+                        self.rng.integers(low=low, high=high, size=shapeList[0])
+                    )
+                    b_arr = np.int32(
+                        self.rng.integers(low=low, high=high, size=shapeList[1])
+                    )
+
+                i = 0
+                while True:
+
+                    a_arr_64 = a_arr.astype(np.int64)
+                    b_arr_64 = b_arr.astype(np.int64)
+
+                    if shift > 0:
+                        rounding = 1 << (shift - 1)
+                        result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
+                    else:
+                        result_arr = a_arr_64 * b_arr_64
+
+                    if (result_arr > -(2 ** 31)).all() and (
+                        result_arr <= ((2 ** 31) - 1)
+                    ).all():
+                        break
+
+                    i = i + 1
+                    a_arr = a_arr // 2
+                    b_arr = b_arr // 2
+
+                placeholders.append(
+                    self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
+                )
+                placeholders.append(
+                    self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
+                )
+
+                tens.extend(placeholders)
         else:
             tens.extend(
                 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
@@ -1858,7 +1949,6 @@
             "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_NARROW_INT_FP,
         },
-
         "avg_pool2d": {
             "op": Op.AVG_POOL2D,
             "operands": (1, 0),
@@ -1867,7 +1957,6 @@
             "qgen": TosaQuantGen.qgUnary,
             "types": TYPE_NARROW_INT_FP,
         },
-
         # Templated operator.  Filled in by createDynamicOpLists
         "conv2d_TEMPLATE": {
             "op": Op.CONV2D,
@@ -1878,9 +1967,7 @@
             "types": TYPE_CONV2D,
             "template": True,
         },
-
         # Conv3d TBD
-
         # Templated operator.  Filled in by createDynamicOpLists
         "depthwise_conv2d_TEMPLATE": {
             "op": Op.DEPTHWISE_CONV2D,
@@ -1896,7 +1983,6 @@
             "types": TYPE_CONV2D,
             "template": True,
         },
-
         "fully_connected": {
             "op": Op.FULLY_CONNECTED,
             "operands": (1, 2),
@@ -1905,7 +1991,6 @@
             "qgen": TosaQuantGen.qgConv,
             "types": TYPE_CONV2D,
         },
-
         "matmul": {
             "op": Op.MATMUL,
             "operands": (2, 0),
@@ -1914,7 +1999,6 @@
             "qgen": TosaQuantGen.qgMatmul,
             "types": TYPE_NARROW_INT_FP,
         },
-
         "max_pool2d": {
             "op": Op.MAX_POOL2D,
             "operands": (1, 0),
@@ -1922,7 +2006,6 @@
             "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
             "types": TYPE_NARROW_INT_FP,
         },
-
         # Templated operator.  Filled in by createDynamicOpLists
         "transpose_conv2d_TEMPLATE": {
             "op": Op.TRANSPOSE_CONV2D,
@@ -1937,7 +2020,6 @@
             "types": TYPE_CONV2D,
             "template": True,
         },
-
         # Activation functions
         "clamp": {
             "op": Op.CLAMP,
@@ -1945,28 +2027,24 @@
             "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
             "types": TYPE_NARROW_INT_FP,
         },
-
         "relun": {
             "op": Op.RELUN,
             "operands": (1, 0),
             "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
             "types": TYPE_FI32,
         },
-
         "sigmoid": {
             "op": Op.SIGMOID,
             "operands": (1, 0),
             "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "tanh": {
             "op": Op.TANH,
             "operands": (1, 0),
             "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         # Elementwise Binary Operators
         "add": {
             "op": Op.ADD,
@@ -1974,7 +2052,6 @@
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "arithmetic_right_shift": {
             "op": Op.ARITHMETIC_RIGHT_SHIFT,
             "operands": (2, 0),
@@ -1985,98 +2062,90 @@
             ),
             "types": TYPE_INT,
         },
-
         "bitwise_and": {
             "op": Op.BITWISE_AND,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_INT,
         },
-
         "bitwise_or": {
             "op": Op.BITWISE_OR,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_INT,
         },
-
         "bitwise_xor": {
             "op": Op.BITWISE_XOR,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_INT,
         },
-
+        "div": {
+            "op": Op.DIV,
+            "operands": (2, 0),
+            "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
+            "types": [DType.INT32],
+        },
         "logical_and": {
             "op": Op.LOGICAL_AND,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_BOOL,
         },
-
         "logical_left_shift": {
             "op": Op.LOGICAL_LEFT_SHIFT,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_INT,
         },
-
         "logical_right_shift": {
             "op": Op.LOGICAL_RIGHT_SHIFT,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_INT,
         },
-
         "logical_or": {
             "op": Op.LOGICAL_OR,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_BOOL,
         },
-
         "logical_xor": {
             "op": Op.LOGICAL_XOR,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_BOOL,
         },
-
         "maximum": {
             "op": Op.MAXIMUM,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "minimum": {
             "op": Op.MINIMUM,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "mul": {
             "op": Op.MUL,
             "operands": (2, 0),
             "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
             "types": TYPE_INT_FP,
         },
-
         "pow": {
             "op": Op.POW,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "sub": {
             "op": Op.SUB,
             "operands": (2, 0),
             "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "table": {
             "op": Op.TABLE,
             # Use the automatic generation functions to create the input array
@@ -2086,7 +2155,6 @@
             "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
             "types": [DType.INT16],
         },
-
         # Elementwise Unary operators
         "abs": {
             "op": Op.ABS,
@@ -2094,56 +2162,48 @@
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FI32,
         },
-
         "bitwise_not": {
             "op": Op.BITWISE_NOT,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_INT,
         },
-
         "ceil": {
             "op": Op.CEIL,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "clz": {
             "op": Op.CLZ,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": [DType.INT32],
         },
-
         "exp": {
             "op": Op.EXP,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "floor": {
             "op": Op.FLOOR,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "log": {
             "op": Op.LOG,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "logical_not": {
             "op": Op.LOGICAL_NOT,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_BOOL,
         },
-
         "negate": {
             "op": Op.NEGATE,
             "operands": (1, 0),
@@ -2151,21 +2211,18 @@
             "qgen": TosaQuantGen.qgUnary,
             "types": TYPE_INT_FP,
         },
-
         "reciprocal": {
             "op": Op.RECIPROCAL,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         "rsqrt": {
             "op": Op.RSQRT,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FP,
         },
-
         # Elementwise Ternary operators
         "select": {
             "op": Op.SELECT,
@@ -2173,7 +2230,6 @@
             "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FIB,
         },
-
         # Comparison operators
         "equal": {
             "op": Op.EQUAL,
@@ -2181,21 +2237,18 @@
             "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "greater_equal": {
             "op": Op.GREATER_EQUAL,
             "operands": (2, 0),
             "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         "greater": {
             "op": Op.GREATER,
             "operands": (2, 0),
             "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
             "types": TYPE_FI32,
         },
-
         # Reduction operators
         "reduce_all": {
             "op": Op.REDUCE_ALL,
@@ -2203,42 +2256,36 @@
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_BOOL,
         },
-
         "reduce_any": {
             "op": Op.REDUCE_ANY,
             "operands": (1, 0),
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_BOOL,
         },
-
         "reduce_max": {
             "op": Op.REDUCE_MAX,
             "operands": (1, 0),
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_INT_FP,
         },
-
         "reduce_min": {
             "op": Op.REDUCE_MAX,
             "operands": (1, 0),
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_INT_FP,
         },
-
         "reduce_product": {
             "op": Op.REDUCE_PRODUCT,
             "operands": (1, 0),
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_FP,
         },
-
         "reduce_sum": {
             "op": Op.REDUCE_SUM,
             "operands": (1, 0),
             "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_FI32,
         },
-
         # Data layout operators
         "concat": {
             "op": Op.CONCAT,
@@ -2246,7 +2293,6 @@
             "build_fcn": (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_FIB,
         },
-
         "pad": {
             "op": Op.PAD,
             "operands": (1, 0),
@@ -2254,35 +2300,30 @@
             "qgen": TosaQuantGen.qgPad,
             "types": TYPE_FIB,
         },
-
         "reshape": {
             "op": Op.RESHAPE,
             "operands": (1, 0),
             "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
             "types": TYPE_FIB,
         },
-
         "reverse": {
             "op": Op.REVERSE,
             "operands": (1, 0),
             "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
             "types": TYPE_FIB,
         },
-
         "slice": {
             "op": Op.SLICE,
             "operands": (1, 0),
             "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
             "types": TYPE_FIB,
         },
-
         "tile": {
             "op": Op.TILE,
             "operands": (1, 0),
             "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
             "types": TYPE_FIB,
         },
-
         "transpose": {
             "op": Op.TRANSPOSE,
             "operands": (1, 0),
@@ -2294,7 +2335,6 @@
             ),
             "types": TYPE_FIB,
         },
-
         # Data nodes
         "const": {
             "op": Op.CONST,
@@ -2302,28 +2342,12 @@
             "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
             "types": TYPE_FIB,
         },
-
         "identity": {
             "op": Op.IDENTITY,
             "operands": (1, 0),
             "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
             "types": TYPE_FIB,
         },
-
-        "identityn": {
-            "op": Op.IDENTITYN,
-            "operands": (2, 0),
-            "build_fcn": (build_identityn, TosaTensorGen.tgBasic, None),
-            "types": TYPE_FIB,
-        },
-
-        "placeholder": {
-            "op": Op.PLACEHOLDER,
-            "operands": (1, 0),
-            "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
-            "types": TYPE_FIB,
-        },
-
         # Scatter/Gather
         "gather": {
             "op": Op.GATHER,
@@ -2333,7 +2357,6 @@
             "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
             "types": TYPE_INT_FP,
         },
-
         "scatter": {
             "op": Op.SCATTER,
             # Only specify 'values_in' tensor here.
@@ -2343,7 +2366,6 @@
             "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
             "types": TYPE_INT_FP,
         },
-
         # Image operations
         "resize": {
             "op": Op.RESIZE,
@@ -2352,7 +2374,6 @@
             "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
             "types": [DType.INT8, DType.INT16, DType.FLOAT],
         },
-
         # Type conversion
         "cast": {
             "op": Op.CAST,
@@ -2360,18 +2381,14 @@
             "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
             "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
         },
-
         "rescale": {
             "op": Op.RESCALE,
             "operands": (1, 0),
             "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
             "types": [DType.INT8, DType.INT16, DType.INT32, DType.INT48],
         },
-
         # Custom
         # Not implemented.
-
-
         # Control flow operators
         # Two varients of cond_if, one that generates one of two constant tensors (no
         # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors