Replace node level check ASSERT_MSG_NODE()/FATAL_ERROR_NODE() with REQUIRE() or ERROR_IF()

- Adding return code enum class: {VALID, UNPREDICTABLE, ERROR}
- Runtime errors (e.g. memory allocation failure) will abort immediately, or will return one of the three return codes
  Part of the codes are re-written to pass REQUIRE() to the top-level (e.g. apply_scale_32/16())
- Update setExpectedFailure() to setExpectedReturnCode() on test generation script
- Update test regression script to interface with reference model change

Signed-off-by: Kevin Cheng <kevin.cheng@arm.com>
Change-Id: Ia063c936bcb2a54d6e379a5bb6801aa72d1186f1
diff --git a/reference_model/src/ops/activation_funcs.cc b/reference_model/src/ops/activation_funcs.cc
index 3410ba9..440f4e1 100644
--- a/reference_model/src/ops/activation_funcs.cc
+++ b/reference_model/src/ops/activation_funcs.cc
@@ -44,7 +44,7 @@
         }
         break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -69,7 +69,7 @@
         }
         break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -84,7 +84,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return (1.0 / (1.0 + (expf(-1.0 * a)))); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -99,7 +99,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return tanhf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
diff --git a/reference_model/src/ops/activation_funcs.h b/reference_model/src/ops/activation_funcs.h
index b051b9d..c834b52 100644
--- a/reference_model/src/ops/activation_funcs.h
+++ b/reference_model/src/ops/activation_funcs.h
@@ -28,8 +28,8 @@
 class OpClamp : public UnaryNode<Rank, Dtype>
 {
 public:
-    OpClamp(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : UnaryNode<Rank, Dtype>(Op_CLAMP, id_)
+    OpClamp(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : UnaryNode<Rank, Dtype>(sgt_, Op_CLAMP, id_)
     {
         INIT_ATTRIBUTE(Clamp);
         register_fcn();
@@ -48,8 +48,8 @@
 class OpReluN : public UnaryNode<Rank, Dtype>
 {
 public:
-    OpReluN(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : UnaryNode<Rank, Dtype>(Op_RELUN, id_)
+    OpReluN(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : UnaryNode<Rank, Dtype>(sgt_, Op_RELUN, id_)
     {
         INIT_ATTRIBUTE(ReluN);
         register_fcn();
@@ -68,8 +68,8 @@
 class OpSigmoid : public UnaryNode<Rank, Dtype>
 {
 public:
-    OpSigmoid(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : UnaryNode<Rank, Dtype>(Op_SIGMOID, id_)
+    OpSigmoid(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : UnaryNode<Rank, Dtype>(sgt_, Op_SIGMOID, id_)
     {
         register_fcn();
     }
@@ -84,8 +84,8 @@
 class OpTanh : public UnaryNode<Rank, Dtype>
 {
 public:
-    OpTanh(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : UnaryNode<Rank, Dtype>(Op_TANH, id_)
+    OpTanh(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : UnaryNode<Rank, Dtype>(sgt_, Op_TANH, id_)
     {
         register_fcn();
     }
diff --git a/reference_model/src/ops/comparison.cc b/reference_model/src/ops/comparison.cc
index 402e152..ab89e24 100644
--- a/reference_model/src/ops/comparison.cc
+++ b/reference_model/src/ops/comparison.cc
@@ -32,7 +32,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a == b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -48,7 +48,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a > b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -64,7 +64,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a >= b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
diff --git a/reference_model/src/ops/comparison.h b/reference_model/src/ops/comparison.h
index e75b1a6..5b4d0f1 100644
--- a/reference_model/src/ops/comparison.h
+++ b/reference_model/src/ops/comparison.h
@@ -28,8 +28,8 @@
 class OpEqual : public BinaryNode<Rank, Dtype, DType_BOOL>
 {
 public:
-    OpEqual(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : BinaryNode<Rank, Dtype, DType_BOOL>(Op_EQUAL, qinfo_, id_)
+    OpEqual(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : BinaryNode<Rank, Dtype, DType_BOOL>(sgt_, Op_EQUAL, qinfo_, id_)
     {
         register_fcn();
     }
@@ -42,8 +42,8 @@
 class OpGreater : public BinaryNode<Rank, Dtype, DType_BOOL>
 {
 public:
-    OpGreater(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : BinaryNode<Rank, Dtype, DType_BOOL>(Op_GREATER, qinfo_, id_)
+    OpGreater(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : BinaryNode<Rank, Dtype, DType_BOOL>(sgt_, Op_GREATER, qinfo_, id_)
     {
         register_fcn();
     }
@@ -56,8 +56,8 @@
 class OpGreaterEqual : public BinaryNode<Rank, Dtype, DType_BOOL>
 {
 public:
-    OpGreaterEqual(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : BinaryNode<Rank, Dtype, DType_BOOL>(Op_EQUAL, qinfo_, id_)
+    OpGreaterEqual(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : BinaryNode<Rank, Dtype, DType_BOOL>(sgt_, Op_EQUAL, qinfo_, id_)
     {
         register_fcn();
     }
diff --git a/reference_model/src/ops/control_flow.cc b/reference_model/src/ops/control_flow.cc
index 1a6a63a..0945056 100644
--- a/reference_model/src/ops/control_flow.cc
+++ b/reference_model/src/ops/control_flow.cc
@@ -20,8 +20,8 @@
 using namespace Eigen;
 using namespace tosa;
 
-OpControlFlow::OpControlFlow(TosaSerializationHandler* tsh_, Op op_, uint64_t id_)
-    : GraphNode(op_, id_)
+OpControlFlow::OpControlFlow(SubgraphTraverser* sgt_, TosaSerializationHandler* tsh_, Op op_, uint64_t id_)
+    : GraphNode(sgt_, op_, id_)
 {
     tsh = tsh_;
 }
@@ -148,8 +148,8 @@
     return 0;
 }
 
-OpCondIf::OpCondIf(TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_)
-    : OpControlFlow(tsh_, Op_COND_IF, id_)
+OpCondIf::OpCondIf(SubgraphTraverser* sgt_, TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_)
+    : OpControlFlow(sgt_, tsh_, Op_COND_IF, id_)
 {
     INIT_ATTRIBUTE(CondIf);
 }
@@ -221,8 +221,11 @@
     return GraphNode::eval();
 }
 
-OpWhileLoop::OpWhileLoop(TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_)
-    : OpControlFlow(tsh_, Op_WHILE_LOOP, id_)
+OpWhileLoop::OpWhileLoop(SubgraphTraverser* sgt_,
+                         TosaSerializationHandler* tsh_,
+                         TosaAttributeBase* attribute_,
+                         uint64_t id_)
+    : OpControlFlow(sgt_, tsh_, Op_WHILE_LOOP, id_)
 {
     INIT_ATTRIBUTE(WhileLoop);
 }
diff --git a/reference_model/src/ops/control_flow.h b/reference_model/src/ops/control_flow.h
index 14c11bc..879cd6a 100644
--- a/reference_model/src/ops/control_flow.h
+++ b/reference_model/src/ops/control_flow.h
@@ -25,7 +25,7 @@
 class OpControlFlow : public GraphNode
 {
 public:
-    OpControlFlow(TosaSerializationHandler* tsh_, Op op_, uint64_t id_);
+    OpControlFlow(SubgraphTraverser* sgt_, TosaSerializationHandler* tsh_, Op op_, uint64_t id_);
     ~OpControlFlow();
 
     virtual int evalBlock(TosaSerializationBasicBlock* block,
@@ -39,7 +39,7 @@
 class OpCondIf : public OpControlFlow
 {
 public:
-    OpCondIf(TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_);
+    OpCondIf(SubgraphTraverser* sgt_, TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_);
     virtual ~OpCondIf();
 
     virtual int checkTensorAttributes();
@@ -55,7 +55,7 @@
 class OpWhileLoop : public OpControlFlow
 {
 public:
-    OpWhileLoop(TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_);
+    OpWhileLoop(SubgraphTraverser* sgt_, TosaSerializationHandler* tsh_, TosaAttributeBase* attribute_, uint64_t id_);
     virtual ~OpWhileLoop();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/custom.cc b/reference_model/src/ops/custom.cc
index 5c4f29b..5fc36f3 100644
--- a/reference_model/src/ops/custom.cc
+++ b/reference_model/src/ops/custom.cc
@@ -19,8 +19,8 @@
 using namespace Eigen;
 using namespace tosa;
 
-OpCustom::OpCustom(uint64_t id_)
-    : GraphNode(Op_CUSTOM, id_)
+OpCustom::OpCustom(SubgraphTraverser* sgt_, uint64_t id_)
+    : GraphNode(sgt_, Op_CUSTOM, id_)
 {}
 
 OpCustom::~OpCustom()
@@ -33,7 +33,7 @@
 
 int OpCustom::eval()
 {
-    FATAL_ERROR_NODE("not supported yet");
+    FATAL_ERROR("not supported yet");
 
     // Evaluation is trivial for constants
     return GraphNode::eval();
diff --git a/reference_model/src/ops/custom.h b/reference_model/src/ops/custom.h
index b1085a5..d14c809 100644
--- a/reference_model/src/ops/custom.h
+++ b/reference_model/src/ops/custom.h
@@ -26,7 +26,7 @@
 class OpCustom : public GraphNode
 {
 public:
-    OpCustom(uint64_t id_);
+    OpCustom(SubgraphTraverser* sgt_, uint64_t id_);
     virtual ~OpCustom();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/data_layout.cc b/reference_model/src/ops/data_layout.cc
index c66d64e..86326f5 100644
--- a/reference_model/src/ops/data_layout.cc
+++ b/reference_model/src/ops/data_layout.cc
@@ -21,8 +21,11 @@
 using namespace tosa;
 
 template <int Rank, DType Dtype>
-OpConcat<Rank, Dtype>::OpConcat(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_CONCAT, id_)
+OpConcat<Rank, Dtype>::OpConcat(SubgraphTraverser* sgt_,
+                                TosaAttributeBase* attribute_,
+                                TosaQuantInfoBase* qinfo_,
+                                uint64_t id_)
+    : GraphNode(sgt_, Op_CONCAT, id_)
 {
     setRequiredOperands(-1, 1);
     setRequiredRank(1, 6);
@@ -95,8 +98,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpPad<Rank, Dtype>::OpPad(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_PAD, id_)
+OpPad<Rank, Dtype>::OpPad(SubgraphTraverser* sgt_,
+                          TosaAttributeBase* attribute_,
+                          TosaQuantInfoBase* qinfo_,
+                          uint64_t id_)
+    : GraphNode(sgt_, Op_PAD, id_)
 {
     setRequiredOperands(2, 1);
     setRequiredRank(0, 6);
@@ -157,8 +163,11 @@
 }
 
 template <int InRank, int OutRank, DType Dtype>
-OpReshape<InRank, OutRank, Dtype>::OpReshape(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_RESHAPE, id_)
+OpReshape<InRank, OutRank, Dtype>::OpReshape(SubgraphTraverser* sgt_,
+                                             TosaAttributeBase* attribute_,
+                                             TosaQuantInfoBase* qinfo_,
+                                             uint64_t id_)
+    : GraphNode(sgt_, Op_RESHAPE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -274,8 +283,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpReverse<Rank, Dtype>::OpReverse(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_REVERSE, id_)
+OpReverse<Rank, Dtype>::OpReverse(SubgraphTraverser* sgt_,
+                                  TosaAttributeBase* attribute_,
+                                  TosaQuantInfoBase* qinfo_,
+                                  uint64_t id_)
+    : GraphNode(sgt_, Op_REVERSE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(1, 6);
@@ -339,8 +351,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpSlice<Rank, Dtype>::OpSlice(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_SLICE, id_)
+OpSlice<Rank, Dtype>::OpSlice(SubgraphTraverser* sgt_,
+                              TosaAttributeBase* attribute_,
+                              TosaQuantInfoBase* qinfo_,
+                              uint64_t id_)
+    : GraphNode(sgt_, Op_SLICE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -407,8 +422,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpTileBase<Rank, Dtype>::OpTileBase(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_TILE, id_)
+OpTileBase<Rank, Dtype>::OpTileBase(SubgraphTraverser* sgt_,
+                                    TosaAttributeBase* attribute_,
+                                    TosaQuantInfoBase* qinfo_,
+                                    uint64_t id_)
+    : GraphNode(sgt_, Op_TILE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -466,7 +484,7 @@
 int OpTile<Rank, Dtype>::eval()
 {
     // primary template shouldn't be called
-    FATAL_ERROR_NODE("OpTile rank=%i, dtype=%s: not implemented yet", Rank, EnumNamesDType()[Dtype]);
+    FATAL_ERROR("OpTile rank=%i, dtype=%s: not implemented yet", Rank, EnumNamesDType()[Dtype]);
 }
 
 template <DType Dtype>
@@ -542,8 +560,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpTranspose<Rank, Dtype>::OpTranspose(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_TRANSPOSE, id_)
+OpTranspose<Rank, Dtype>::OpTranspose(SubgraphTraverser* sgt_,
+                                      TosaAttributeBase* attribute_,
+                                      TosaQuantInfoBase* qinfo_,
+                                      uint64_t id_)
+    : GraphNode(sgt_, Op_TRANSPOSE, id_)
 {
     setRequiredOperands(2, 1);
     setRequiredRank(0, 6);
diff --git a/reference_model/src/ops/data_layout.h b/reference_model/src/ops/data_layout.h
index b180b4f..c9c2602 100644
--- a/reference_model/src/ops/data_layout.h
+++ b/reference_model/src/ops/data_layout.h
@@ -27,7 +27,7 @@
 class OpConcat : public GraphNode
 {
 public:
-    OpConcat(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpConcat(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpConcat();
 
     virtual int checkTensorAttributes();
@@ -49,7 +49,7 @@
 class OpPad : public GraphNode
 {
 public:
-    OpPad(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpPad(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpPad();
     virtual int checkTensorAttributes();
     virtual int eval();
@@ -70,7 +70,7 @@
 class OpReshape : public GraphNode
 {
 public:
-    OpReshape(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpReshape(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpReshape();
 
     virtual int checkTensorAttributes();
@@ -94,7 +94,7 @@
 class OpReverse : public GraphNode
 {
 public:
-    OpReverse(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpReverse(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpReverse();
 
     virtual int checkTensorAttributes();
@@ -116,7 +116,7 @@
 class OpSlice : public GraphNode
 {
 public:
-    OpSlice(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpSlice(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpSlice();
 
     virtual int checkTensorAttributes();
@@ -139,7 +139,7 @@
 class OpTileBase : public GraphNode
 {
 public:
-    OpTileBase(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpTileBase(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpTileBase();
 
     virtual int checkTensorAttributes();
@@ -160,8 +160,8 @@
 class OpTile : public OpTileBase<Rank, Dtype>
 {
 public:
-    OpTile(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : OpTileBase<Rank, Dtype>(attribute_, qinfo_, id_)
+    OpTile(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : OpTileBase<Rank, Dtype>(sgt_, attribute_, qinfo_, id_)
     {}
 
 protected:
@@ -174,8 +174,8 @@
     class OpTile<N, Dtype> : public OpTileBase<N, Dtype>                                                               \
     {                                                                                                                  \
     public:                                                                                                            \
-        OpTile(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)                                 \
-            : OpTileBase<N, Dtype>(attribute_, qinfo_, id_)                                                            \
+        OpTile(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)        \
+            : OpTileBase<N, Dtype>(sgt_, attribute_, qinfo_, id_)                                                      \
         {}                                                                                                             \
                                                                                                                        \
     protected:                                                                                                         \
@@ -193,7 +193,7 @@
 class OpTranspose : public GraphNode
 {
 public:
-    OpTranspose(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpTranspose(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpTranspose();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/data_nodes.cc b/reference_model/src/ops/data_nodes.cc
index baae019..ec4bc41 100644
--- a/reference_model/src/ops/data_nodes.cc
+++ b/reference_model/src/ops/data_nodes.cc
@@ -19,8 +19,8 @@
 using namespace Eigen;
 using namespace tosa;
 
-OpConst::OpConst(uint64_t id_)
-    : GraphNode(Op_CONST, id_)
+OpConst::OpConst(SubgraphTraverser* sgt_, uint64_t id_)
+    : GraphNode(sgt_, Op_CONST, id_)
 {
     setRequiredOperands(0, 1);
 }
@@ -43,8 +43,11 @@
 }
 
 template <int Rank, DType Dtype>
-OpIdentity<Rank, Dtype>::OpIdentity(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_IDENTITY, id_)
+OpIdentity<Rank, Dtype>::OpIdentity(SubgraphTraverser* sgt_,
+                                    TosaAttributeBase* attribute_,
+                                    TosaQuantInfoBase* qinfo_,
+                                    uint64_t id_)
+    : GraphNode(sgt_, Op_IDENTITY, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
diff --git a/reference_model/src/ops/data_nodes.h b/reference_model/src/ops/data_nodes.h
index a02d441..407cf0a 100644
--- a/reference_model/src/ops/data_nodes.h
+++ b/reference_model/src/ops/data_nodes.h
@@ -24,7 +24,7 @@
 class OpConst : public GraphNode
 {
 public:
-    OpConst(uint64_t id_);
+    OpConst(SubgraphTraverser* sgt_, uint64_t id_);
     virtual ~OpConst();
 
     virtual int checkTensorAttributes();
@@ -35,7 +35,7 @@
 class OpIdentity : public GraphNode
 {
 public:
-    OpIdentity(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpIdentity(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpIdentity();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc
index 3379ffe..16c4901 100644
--- a/reference_model/src/ops/ewise_binary.cc
+++ b/reference_model/src/ops/ewise_binary.cc
@@ -23,8 +23,11 @@
 using namespace tosa;
 
 template <int Rank, DType InDtype, DType OutDtype>
-BinaryNodeBase<Rank, InDtype, OutDtype>::BinaryNodeBase(const Op& op_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(op_, id_)
+BinaryNodeBase<Rank, InDtype, OutDtype>::BinaryNodeBase(SubgraphTraverser* sgt_,
+                                                        const Op& op_,
+                                                        TosaQuantInfoBase* qinfo_,
+                                                        uint64_t id_)
+    : GraphNode(sgt_, op_, id_)
 {
     setRequiredOperands(2, 1);
     setRequiredRank(0, 6);
@@ -203,7 +206,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a + b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[InDtype]);
     }
 
     return 0;
@@ -226,12 +229,12 @@
             num_bits = 32;
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     this->fcn = [this, round, num_bits](InEigenType a, InEigenType b) -> OutEigenType {
-        ASSERT_MSG_NODE(b >= 0 && b < num_bits, "OpArithmeticRightShift: shift value %d is out of valid range [0, %d]",
-                        (int32_t)b, num_bits);
+        REQUIRE(b >= 0 && b < num_bits, "OpArithmeticRightShift: shift value %d is out of valid range [0, %d]",
+                (int32_t)b, num_bits);
 
         InEigenType acc = a >> b;
 
@@ -257,7 +260,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a & b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -274,7 +277,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a | b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -291,7 +294,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a ^ b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -304,15 +307,15 @@
     {
         case DType_INT32:
             this->fcn = [this](InEigenType a, InEigenType b) -> OutEigenType {
-                ASSERT_MSG_NODE(b != 0, "OpDiv: divisor must be non-zero value");
+                REQUIRE(b != 0, "OpDiv: divisor must be non-zero value");
                 int64_t res_in_64     = static_cast<int64_t>(a) / b;
                 int64_t i32_max_in_64 = static_cast<int64_t>(std::numeric_limits<InEigenType>::max());
-                ASSERT_MSG_NODE(a <= i32_max_in_64, "OpDiv: result not in i32 range");
+                REQUIRE(a <= i32_max_in_64, "OpDiv: result not in i32 range");
                 return static_cast<InEigenType>(res_in_64);
             };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[InDtype]);
     }
 
     return 0;
@@ -327,7 +330,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a && b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -344,7 +347,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a << b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -366,7 +369,7 @@
             num_bits = 32;
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     this->fcn = [num_bits](InEigenType a, InEigenType b) -> OutEigenType {
@@ -386,7 +389,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a || b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -401,7 +404,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a ^ b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -417,7 +420,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a > b ? a : b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -433,7 +436,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a < b ? a : b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -443,8 +446,6 @@
 int OpMul<Rank, InDtype, OutDtype>::register_fcn()
 {
     int32_t shift = attribute->shift();
-    ASSERT_MSG_NODE(InDtype == DType_INT32 || shift == 0, "OpMul: shift needs to be 0 but is %d if input is %s", shift,
-                    EnumNamesDType()[InDtype]);
 
     switch (InDtype)
     {
@@ -460,8 +461,8 @@
                     result        = static_cast<int64_t>(a) * static_cast<int64_t>(b) + round;
                     result        = result >> shift;
 
-                    ASSERT_MSG_NODE(result >= QMin && result <= QMax,
-                                    "OpMul: result %ld exceeds valid range [%ld, %ld]", result, QMin, QMax);
+                    REQUIRE(result >= QMin && result <= QMax, "OpMul: result %ld exceeds valid range [%ld, %ld]",
+                            result, QMin, QMax);
                 }
                 else
                 {
@@ -482,7 +483,7 @@
             };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[InDtype]);
     }
 
     return 0;
@@ -497,7 +498,7 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return powf(a, b); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -513,15 +514,18 @@
             this->fcn = [](InEigenType a, InEigenType b) -> OutEigenType { return a - b; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[InDtype]);
     }
 
     return 0;
 }
 
 template <int Rank, DType InDtype>
-OpTable<Rank, InDtype>::OpTable(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_TABLE, id_)
+OpTable<Rank, InDtype>::OpTable(SubgraphTraverser* sgt_,
+                                TosaAttributeBase* attribute_,
+                                TosaQuantInfoBase* qinfo_,
+                                uint64_t id_)
+    : GraphNode(sgt_, Op_TABLE, id_)
 {
     setRequiredOperands(2, 1);
     setRequiredRank(0, 6);
@@ -607,7 +611,7 @@
             });
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[InDtype]);
     }
 
     return GraphNode::eval();
diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h
index a5b1059..86b2101 100644
--- a/reference_model/src/ops/ewise_binary.h
+++ b/reference_model/src/ops/ewise_binary.h
@@ -42,7 +42,7 @@
 class BinaryNodeBase : public GraphNode
 {
 public:
-    BinaryNodeBase(const Op& nodeType, TosaQuantInfoBase* qinfo_, const uint64_t id_);
+    BinaryNodeBase(SubgraphTraverser* sgt_, const Op& nodeType, TosaQuantInfoBase* qinfo_, const uint64_t id_);
     virtual ~BinaryNodeBase();
 
     virtual int checkTensorAttributes() final;
@@ -76,8 +76,8 @@
 class BinaryNode : public BinaryNodeBase<Rank, InDtype, OutDtype>
 {
 public:
-    BinaryNode(const Op& op_, TosaQuantInfoBase* qinfo_, const uint64_t id_)
-        : BinaryNodeBase<Rank, InDtype, OutDtype>(op_, qinfo_, id_)
+    BinaryNode(SubgraphTraverser* sgt_, const Op& op_, TosaQuantInfoBase* qinfo_, const uint64_t id_)
+        : BinaryNodeBase<Rank, InDtype, OutDtype>(sgt_, op_, qinfo_, id_)
     {}
     virtual ~BinaryNode()
     {}
@@ -95,8 +95,8 @@
 class BinaryNode<0, InDtype, OutDtype> : public BinaryNodeBase<0, InDtype, OutDtype>
 {
 public:
-    BinaryNode(const Op& op_, TosaQuantInfoBase* qinfo_, const uint64_t id_)
-        : BinaryNodeBase<0, InDtype, OutDtype>(op_, qinfo_, id_)
+    BinaryNode(SubgraphTraverser* sgt_, const Op& op_, TosaQuantInfoBase* qinfo_, const uint64_t id_)
+        : BinaryNodeBase<0, InDtype, OutDtype>(sgt_, op_, qinfo_, id_)
     {}
     virtual ~BinaryNode()
     {}
@@ -109,8 +109,8 @@
     class Op##Opname : public BinaryNode<Rank, Dtype, Dtype>                                                           \
     {                                                                                                                  \
     public:                                                                                                            \
-        Op##Opname(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)                             \
-            : BinaryNode<Rank, Dtype, Dtype>(Op_##OPNAME, qinfo_, id_)                                                 \
+        Op##Opname(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)    \
+            : BinaryNode<Rank, Dtype, Dtype>(sgt_, Op_##OPNAME, qinfo_, id_)                                           \
         {                                                                                                              \
             register_fcn();                                                                                            \
         }                                                                                                              \
@@ -142,8 +142,11 @@
 class OpArithmeticRightShift : public BinaryNode<Rank, Dtype, Dtype>
 {
 public:
-    OpArithmeticRightShift(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : BinaryNode<Rank, Dtype, Dtype>(Op_ARITHMETIC_RIGHT_SHIFT, qinfo_, id_)
+    OpArithmeticRightShift(SubgraphTraverser* sgt_,
+                           TosaAttributeBase* attribute_,
+                           TosaQuantInfoBase* qinfo_,
+                           uint64_t id_)
+        : BinaryNode<Rank, Dtype, Dtype>(sgt_, Op_ARITHMETIC_RIGHT_SHIFT, qinfo_, id_)
     {
         INIT_ATTRIBUTE(ArithmeticRightShift);
         register_fcn();
@@ -160,8 +163,8 @@
 class OpMul : public BinaryNode<Rank, InDtype, OutDtype>
 {
 public:
-    OpMul(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : BinaryNode<Rank, InDtype, OutDtype>(Op_MUL, qinfo_, id_)
+    OpMul(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : BinaryNode<Rank, InDtype, OutDtype>(sgt_, Op_MUL, qinfo_, id_)
     {
         INIT_ATTRIBUTE(Mul);
         register_fcn();
@@ -180,7 +183,7 @@
 class OpTable : public GraphNode
 {
 public:
-    OpTable(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpTable(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpTable();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/ewise_ternary.cc b/reference_model/src/ops/ewise_ternary.cc
index d4845f9..64c4412 100644
--- a/reference_model/src/ops/ewise_ternary.cc
+++ b/reference_model/src/ops/ewise_ternary.cc
@@ -20,8 +20,11 @@
 using namespace tosa;
 
 template <int Rank, DType Dtype>
-OpSelectBase<Rank, Dtype>::OpSelectBase(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_SELECT, id_)
+OpSelectBase<Rank, Dtype>::OpSelectBase(SubgraphTraverser* sgt_,
+                                        TosaAttributeBase* attribute_,
+                                        TosaQuantInfoBase* qinfo_,
+                                        uint64_t id_)
+    : GraphNode(sgt_, Op_SELECT, id_)
 {
     setRequiredOperands(3, 1);
     setRequiredRank(0, 6);
@@ -62,7 +65,7 @@
 template <int Rank, DType Dtype>
 int OpSelectBase<Rank, Dtype>::eval()
 {
-    FATAL_ERROR_NODE("shouldn't be called");
+    FATAL_ERROR("shouldn't be called");
 }
 
 template <int Rank, DType Dtype>
@@ -78,9 +81,9 @@
         this->bcast_cond[i] = (cond_shape[i] == 1) ? std::max(then_shape[i], else_shape[i]) : 1;
         this->bcast_then[i] = (then_shape[i] == 1) ? std::max(cond_shape[i], else_shape[i]) : 1;
         this->bcast_else[i] = (else_shape[i] == 1) ? std::max(then_shape[i], cond_shape[i]) : 1;
-        ASSERT_MSG_NODE((this->bcast_cond[i] * cond_shape[i]) == out_shape[i], "SELECT broadcast invariant failed");
-        ASSERT_MSG_NODE((this->bcast_then[i] * then_shape[i]) == out_shape[i], "SELECT broadcast invariant failed");
-        ASSERT_MSG_NODE((this->bcast_else[i] * else_shape[i]) == out_shape[i], "SELECT broadcast invariant failed");
+        ERROR_IF((this->bcast_cond[i] * cond_shape[i]) != out_shape[i], "SELECT broadcast invariant failed");
+        ERROR_IF((this->bcast_then[i] * then_shape[i]) != out_shape[i], "SELECT broadcast invariant failed");
+        ERROR_IF((this->bcast_else[i] * else_shape[i]) != out_shape[i], "SELECT broadcast invariant failed");
     }
 
     return 0;
diff --git a/reference_model/src/ops/ewise_ternary.h b/reference_model/src/ops/ewise_ternary.h
index b354247..b80fb23 100644
--- a/reference_model/src/ops/ewise_ternary.h
+++ b/reference_model/src/ops/ewise_ternary.h
@@ -33,7 +33,7 @@
 class OpSelectBase : public GraphNode
 {
 public:
-    OpSelectBase(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpSelectBase(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpSelectBase();
 
     virtual int checkTensorAttributes();
@@ -59,8 +59,8 @@
 class OpSelect : public OpSelectBase<Rank, Dtype>
 {
 public:
-    OpSelect(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : OpSelectBase<Rank, Dtype>(attribute_, qinfo_, id_)
+    OpSelect(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : OpSelectBase<Rank, Dtype>(sgt_, attribute_, qinfo_, id_)
     {}
     virtual int eval();
     int broadcast();
@@ -73,8 +73,8 @@
 class OpSelect<0, Dtype> : public OpSelectBase<0, Dtype>
 {
 public:
-    OpSelect(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : OpSelectBase<0, Dtype>(attribute_, qinfo_, id_)
+    OpSelect(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : OpSelectBase<0, Dtype>(sgt_, attribute_, qinfo_, id_)
     {}
     virtual int eval();
 };
diff --git a/reference_model/src/ops/ewise_unary.cc b/reference_model/src/ops/ewise_unary.cc
index 95a1102..041bbdb 100644
--- a/reference_model/src/ops/ewise_unary.cc
+++ b/reference_model/src/ops/ewise_unary.cc
@@ -23,8 +23,8 @@
 using namespace tosa;
 
 template <int Rank, DType Dtype>
-UnaryNode<Rank, Dtype>::UnaryNode(const Op& op_, uint64_t id_)
-    : GraphNode(op_, id_)
+UnaryNode<Rank, Dtype>::UnaryNode(SubgraphTraverser* sgt_, const Op& op_, uint64_t id_)
+    : GraphNode(sgt_, op_, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -80,7 +80,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return a > (InEigenType)0 ? a : (-a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -97,7 +97,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return ~a; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -112,7 +112,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return ceilf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -128,7 +128,7 @@
             num_bits = 32;
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     this->fcn = [num_bits](int32_t a) -> int32_t {
@@ -159,7 +159,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return expf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -174,7 +174,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return floorf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -189,7 +189,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return logf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -204,7 +204,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return !a; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -213,6 +213,12 @@
 template <int Rank, DType Dtype>
 int OpNegate<Rank, Dtype>::register_fcn()
 {
+    if (Dtype != DType_INT8 && this->qinfo)
+    {
+        ERROR_IF(this->qinfo->input_zp() != 0, "OpNegate: zeropoint only for int8_t");
+        ERROR_IF(this->qinfo->output_zp() != 0, "OpNegate: zeropoint only for int8_t");
+    }
+
     switch (Dtype)
     {
         case DType_FLOAT:
@@ -229,7 +235,6 @@
             };
             break;
         case DType_INT8:
-            ASSERT(this->qinfo);
             this->fcn = [this](InEigenType a) -> OutEigenType {
                 InEigenType result = -(a - this->qinfo->input_zp()) + this->qinfo->output_zp();
                 result = std::min(std::max(result, static_cast<InEigenType>(QMin)), static_cast<InEigenType>(QMax));
@@ -237,7 +242,7 @@
             };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -252,7 +257,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return 1.0 / a; };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
@@ -267,7 +272,7 @@
             this->fcn = [](InEigenType a) -> OutEigenType { return 1.0 / sqrtf(a); };
             break;
         default:
-            FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[Dtype]);
+            ERROR_IF(true, "unsupported DType %s", EnumNamesDType()[Dtype]);
     }
 
     return 0;
diff --git a/reference_model/src/ops/ewise_unary.h b/reference_model/src/ops/ewise_unary.h
index 0db3cfb..374c8e4 100644
--- a/reference_model/src/ops/ewise_unary.h
+++ b/reference_model/src/ops/ewise_unary.h
@@ -26,7 +26,7 @@
 class UnaryNode : public GraphNode
 {
 public:
-    UnaryNode(const Op& nodeType, const uint64_t id_);
+    UnaryNode(SubgraphTraverser* sgt_, const Op& nodeType, const uint64_t id_);
     virtual ~UnaryNode();
 
     virtual int checkTensorAttributes() final;
@@ -49,8 +49,8 @@
     class Op##Opname : public UnaryNode<Rank, Dtype>                                                                   \
     {                                                                                                                  \
     public:                                                                                                            \
-        Op##Opname(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)                             \
-            : UnaryNode<Rank, Dtype>(Op_##OPNAME, id_)                                                                 \
+        Op##Opname(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)    \
+            : UnaryNode<Rank, Dtype>(sgt_, Op_##OPNAME, id_)                                                           \
         {                                                                                                              \
             register_fcn();                                                                                            \
         }                                                                                                              \
@@ -66,8 +66,8 @@
     class Op##Opname : public UnaryNode<Rank, Dtype>                                                                   \
     {                                                                                                                  \
     public:                                                                                                            \
-        Op##Opname(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)                             \
-            : UnaryNode<Rank, Dtype>(Op_##OPNAME, id_)                                                                 \
+        Op##Opname(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)    \
+            : UnaryNode<Rank, Dtype>(sgt_, Op_##OPNAME, id_)                                                           \
         {                                                                                                              \
             INIT_QINFO(Unary);                                                                                         \
             register_fcn();                                                                                            \
diff --git a/reference_model/src/ops/image.cc b/reference_model/src/ops/image.cc
index 829a6e0..f4decae 100644
--- a/reference_model/src/ops/image.cc
+++ b/reference_model/src/ops/image.cc
@@ -22,8 +22,11 @@
 using namespace tosa;
 
 template <DType InDtype, DType OutDtype>
-OpResize<InDtype, OutDtype>::OpResize(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_RESIZE, id_)
+OpResize<InDtype, OutDtype>::OpResize(SubgraphTraverser* sgt_,
+                                      TosaAttributeBase* attribute_,
+                                      TosaQuantInfoBase* qinfo_,
+                                      uint64_t id_)
+    : GraphNode(sgt_, Op_RESIZE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(4, 4);
@@ -102,10 +105,13 @@
     int out_width    = out->getShape()[2];
     int out_channels = out->getShape()[3];
 
-    ASSERT_MSG_NODE(shift > 0 && shift <= 11, "OpResize: attribute shift should be within [1, 11]");
-    ASSERT_MSG_NODE(stride[0] > 0 && stride[1] > 0, "OpResize: invalid attribute stride");
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpResize: output tensor batch mismatch");
-    ASSERT_MSG_NODE(in_channels == out_channels, "OpResize: output tensor channel mismatch");
+    ERROR_IF(shift < 1 || shift > 11, "OpResize: attribute shift should be within [1, 11]");
+    ERROR_IF(stride[0] <= 0 || stride[0] >= (16 << shift), "OpResize: invalid attribute stride_x");
+    ERROR_IF(stride[1] <= 0 || stride[1] >= (16 << shift), "OpResize: invalid attribute stride_y");
+    ERROR_IF(offset[0] <= (-16 << shift) || offset[0] >= (16 << shift), "OpResize: invalid attribute offset_x");
+    ERROR_IF(offset[1] <= (-16 << shift) || offset[1] >= (16 << shift), "OpResize: invalid attribute offset_y");
+    ERROR_IF(in_batch != out_batch, "OpResize: output tensor batch mismatch");
+    ERROR_IF(in_channels != out_channels, "OpResize: output tensor channel mismatch");
 
     for (int b = 0; b < out_batch; b++)
         for (int c = 0; c < out_channels; c++)
@@ -125,8 +131,8 @@
                     int32_t ix0 = MAX(ix, 0);
                     int32_t ix1 = MIN(ix + 1, in_width - 1);
 
-                    ASSERT_MSG(iy0 <= iy1 && ix0 <= ix1, "OpResize: invalid index (iy0, iy1, ix0, ix1)=(%d,%d,%d,%d)",
-                               iy0, iy1, ix0, ix1);
+                    REQUIRE(iy0 <= iy1 && ix0 <= ix1, "OpResize: invalid index (iy0, iy1, ix0, ix1)=(%d,%d,%d,%d)", iy0,
+                            iy1, ix0, ix1);
 
                     OutEigenType acc;
                     if (mode == ResizeMode_BILINEAR)
@@ -167,10 +173,10 @@
     int out_width    = out->getShape()[2];
     int out_channels = out->getShape()[3];
 
-    ASSERT_MSG_NODE(shift == 0, "OpResize: float mode must have 0 shift");
-    ASSERT_MSG_NODE(stride_fp[0] > 0.0f && stride_fp[1] > 0.0f, "OpResize: invalid attribute stride");
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpResize: output tensor batch mismatch");
-    ASSERT_MSG_NODE(in_channels == out_channels, "OpResize: output tensor channel mismatch");
+    ERROR_IF(shift != 0, "OpResize: float mode must have 0 shift");
+    ERROR_IF(stride_fp[0] <= 0.0f || stride_fp[1] <= 0.0f, "OpResize: invalid attribute stride");
+    ERROR_IF(in_batch != out_batch, "OpResize: output tensor batch mismatch");
+    ERROR_IF(in_channels != out_channels, "OpResize: output tensor channel mismatch");
 
     for (int b = 0; b < out_batch; b++)
         for (int c = 0; c < out_channels; c++)
@@ -190,8 +196,8 @@
                     int32_t ix0 = MAX(ix, 0);
                     int32_t ix1 = MIN(ix + 1, in_width - 1);
 
-                    ASSERT_MSG(iy0 <= iy1 && ix0 <= ix1, "OpResize: invalid index (iy0, iy1, ix0, ix1)=(%d,%d,%d,%d)",
-                               iy0, iy1, ix0, ix1);
+                    REQUIRE(iy0 <= iy1 && ix0 <= ix1, "OpResize: invalid index (iy0, iy1, ix0, ix1)=(%d,%d,%d,%d)", iy0,
+                            iy1, ix0, ix1);
 
                     OutEigenType acc;
                     if (mode == ResizeMode_BILINEAR)
diff --git a/reference_model/src/ops/image.h b/reference_model/src/ops/image.h
index 5dd14c8..095dc7d 100644
--- a/reference_model/src/ops/image.h
+++ b/reference_model/src/ops/image.h
@@ -27,7 +27,7 @@
 class OpResize : public GraphNode
 {
 public:
-    OpResize(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpResize(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpResize();
     virtual int checkTensorAttributes() final;
     virtual int eval();
diff --git a/reference_model/src/ops/op_factory.cc b/reference_model/src/ops/op_factory.cc
index 726ab7c..2d9e428 100644
--- a/reference_model/src/ops/op_factory.cc
+++ b/reference_model/src/ops/op_factory.cc
@@ -32,7 +32,8 @@
 using namespace TosaReference;
 using namespace tosa;
 
-GraphNode* OpFactory::newOp(TosaSerializationHandler* tsh,
+GraphNode* OpFactory::newOp(SubgraphTraverser* sgt,
+                            TosaSerializationHandler* tsh,
                             Op opType,
                             TosaAttributeBase* attribute,
                             TosaQuantInfoBase* qinfo,
@@ -349,7 +350,7 @@
 
         // data_nodes
         case Op_CONST:
-            return new OpConst(id);
+            return new OpConst(sgt, id);
         case Op_IDENTITY:
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, FLOAT);
             DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT32);
@@ -398,13 +399,13 @@
 
         // custom
         case Op_CUSTOM:
-            return new OpCustom(id);
+            return new OpCustom(sgt, id);
 
         // control_flow
         case Op_COND_IF:
-            return new OpCondIf(tsh, attribute, id);
+            return new OpCondIf(sgt, tsh, attribute, id);
         case Op_WHILE_LOOP:
-            return new OpWhileLoop(tsh, attribute, id);
+            return new OpWhileLoop(sgt, tsh, attribute, id);
 
         // Ops not recognized
         default:
diff --git a/reference_model/src/ops/op_factory.h b/reference_model/src/ops/op_factory.h
index 0c116b6..eaa359c 100644
--- a/reference_model/src/ops/op_factory.h
+++ b/reference_model/src/ops/op_factory.h
@@ -24,55 +24,55 @@
 
 #define DEF_FACTORY_ONE_RANK_ONE_TYPE(OP, RANK, DTYPE)                                                                 \
     case RANK:                                                                                                         \
-        return new OP<RANK, DType_##DTYPE>(attribute, qinfo, id);
+        return new OP<RANK, DType_##DTYPE>(sgt, attribute, qinfo, id);
 
 #define DEF_FACTORY_ONE_RANK_TWO_TYPE(OP, RANK, DTYPE1, DTYPE2)                                                        \
     case RANK:                                                                                                         \
-        return new OP<RANK, DType_##DTYPE1, DType_##DTYPE2>(attribute, qinfo, id);
+        return new OP<RANK, DType_##DTYPE1, DType_##DTYPE2>(sgt, attribute, qinfo, id);
 
 #define DEF_FACTORY_TWO_RANK_ONE_TYPE(OP, RANK1, RANK2, DTYPE)                                                         \
     case RANK2:                                                                                                        \
-        return new OP<RANK1, RANK2, DType_##DTYPE>(attribute, qinfo, id);
+        return new OP<RANK1, RANK2, DType_##DTYPE>(sgt, attribute, qinfo, id);
 
 #define DEF_FACTORY_TWO_RANK_TWO_TYPE(OP, RANK1, RANK2, DTYPE1, DTYPE2)                                                \
     case RANK2:                                                                                                        \
-        return new OP<RANK1, RANK2, DType_##DTYPE1, DType_##DTYPE2>(attribute, qinfo, id);
+        return new OP<RANK1, RANK2, DType_##DTYPE1, DType_##DTYPE2>(sgt, attribute, qinfo, id);
 
 #define DEF_FACTORY_ONE_RANK_0_6(OP)                                                                                   \
     switch (inputRank)                                                                                                 \
     {                                                                                                                  \
         case 0:                                                                                                        \
-            return new OP<0>(attribute, qinfo, id);                                                                    \
+            return new OP<0>(sgt, attribute, qinfo, id);                                                               \
         case 1:                                                                                                        \
-            return new OP<1>(attribute, qinfo, id);                                                                    \
+            return new OP<1>(sgt, attribute, qinfo, id);                                                               \
         case 2:                                                                                                        \
-            return new OP<2>(attribute, qinfo, id);                                                                    \
+            return new OP<2>(sgt, attribute, qinfo, id);                                                               \
         case 3:                                                                                                        \
-            return new OP<3>(attribute, qinfo, id);                                                                    \
+            return new OP<3>(sgt, attribute, qinfo, id);                                                               \
         case 4:                                                                                                        \
-            return new OP<4>(attribute, qinfo, id);                                                                    \
+            return new OP<4>(sgt, attribute, qinfo, id);                                                               \
         case 5:                                                                                                        \
-            return new OP<5>(attribute, qinfo, id);                                                                    \
+            return new OP<5>(sgt, attribute, qinfo, id);                                                               \
         case 6:                                                                                                        \
-            return new OP<6>(attribute, qinfo, id);                                                                    \
+            return new OP<6>(sgt, attribute, qinfo, id);                                                               \
     }
 
 #define DEF_FACTORY_ONE_TYPE(OP, DTYPE)                                                                                \
     if (inputDType == DType_##DTYPE)                                                                                   \
     {                                                                                                                  \
-        return new OP<DType_##DTYPE>(attribute, qinfo, id);                                                            \
+        return new OP<DType_##DTYPE>(sgt, attribute, qinfo, id);                                                       \
     }
 
 #define DEF_FACTORY_TWO_TYPE(OP, DTYPE1, DTYPE2)                                                                       \
     if (inputDType == DType_##DTYPE1 && weightDType == DType_##DTYPE2)                                                 \
     {                                                                                                                  \
-        return new OP<DType_##DTYPE1, DType_##DTYPE2>(attribute, qinfo, id);                                           \
+        return new OP<DType_##DTYPE1, DType_##DTYPE2>(sgt, attribute, qinfo, id);                                      \
     }
 
 #define DEF_FACTORY_TWO_TYPE_RESIZE(OP, DTYPE1, DTYPE2)                                                                \
     if (inputDType == DType_##DTYPE1 && outputDType == DType_##DTYPE2)                                                 \
     {                                                                                                                  \
-        return new OP<DType_##DTYPE1, DType_##DTYPE2>(attribute, qinfo, id);                                           \
+        return new OP<DType_##DTYPE1, DType_##DTYPE2>(sgt, attribute, qinfo, id);                                      \
     }
 
 #define DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OP, DTYPE)                                                               \
@@ -221,10 +221,14 @@
 namespace TosaReference
 {
 
+class SubgraphTraverser;
+class GraphNode;
+
 class OpFactory
 {
 public:
-    static GraphNode* newOp(tosa::TosaSerializationHandler* tsh,
+    static GraphNode* newOp(SubgraphTraverser* sgt,
+                            tosa::TosaSerializationHandler* tsh,
                             tosa::Op opType,
                             tosa::TosaAttributeBase* attribute,
                             tosa::TosaQuantInfoBase* qinfo,
diff --git a/reference_model/src/ops/reduction.cc b/reference_model/src/ops/reduction.cc
index 97a7aa7..107c7a8 100644
--- a/reference_model/src/ops/reduction.cc
+++ b/reference_model/src/ops/reduction.cc
@@ -21,8 +21,8 @@
 using namespace tosa;
 
 template <int Rank, DType Dtype>
-ReduceNode<Rank, Dtype>::ReduceNode(const Op& op_, TosaAttributeBase* attribute_, uint64_t id_)
-    : GraphNode(op_, id_)
+ReduceNode<Rank, Dtype>::ReduceNode(SubgraphTraverser* sgt_, const Op& op_, TosaAttributeBase* attribute_, uint64_t id_)
+    : GraphNode(sgt_, op_, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 4);
diff --git a/reference_model/src/ops/reduction.h b/reference_model/src/ops/reduction.h
index cf75812..f4e29b9 100644
--- a/reference_model/src/ops/reduction.h
+++ b/reference_model/src/ops/reduction.h
@@ -27,7 +27,7 @@
 class ReduceNode : public GraphNode
 {
 public:
-    ReduceNode(const Op& nodeType, TosaAttributeBase* attribute_, const uint64_t id_);
+    ReduceNode(SubgraphTraverser* sgt_, const Op& nodeType, TosaAttributeBase* attribute_, const uint64_t id_);
     virtual ~ReduceNode();
     virtual int checkTensorAttributes();
     virtual int eval() = 0;
@@ -48,8 +48,8 @@
 class OpReduceAll : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceAll(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_ALL, attribute_, id_)
+    OpReduceAll(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_ALL, attribute_, id_)
     {}
     virtual int eval();
 };
@@ -58,8 +58,8 @@
 class OpReduceAny : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceAny(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_ALL, attribute_, id_)
+    OpReduceAny(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_ALL, attribute_, id_)
     {}
     virtual int eval();
 };
@@ -68,8 +68,8 @@
 class OpReduceMax : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceMax(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_MAX, attribute_, id_)
+    OpReduceMax(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_MAX, attribute_, id_)
     {}
     virtual int eval();
 };
@@ -78,8 +78,8 @@
 class OpReduceMin : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceMin(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_MIN, attribute_, id_)
+    OpReduceMin(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_MIN, attribute_, id_)
     {}
     virtual int eval();
 };
@@ -88,8 +88,8 @@
 class OpReduceProduct : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceProduct(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_PRODUCT, attribute_, id_)
+    OpReduceProduct(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_PRODUCT, attribute_, id_)
     {}
     virtual int eval();
 };
@@ -98,8 +98,8 @@
 class OpReduceSum : public ReduceNode<Rank, Dtype>
 {
 public:
-    OpReduceSum(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-        : ReduceNode<Rank, Dtype>(Op_REDUCE_SUM, attribute_, id_)
+    OpReduceSum(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
+        : ReduceNode<Rank, Dtype>(sgt_, Op_REDUCE_SUM, attribute_, id_)
     {}
     virtual int eval();
 };
diff --git a/reference_model/src/ops/scatter_gather.cc b/reference_model/src/ops/scatter_gather.cc
index 478b776..02ec54f 100644
--- a/reference_model/src/ops/scatter_gather.cc
+++ b/reference_model/src/ops/scatter_gather.cc
@@ -21,8 +21,11 @@
 using namespace tosa;
 
 template <DType Dtype>
-OpGather<Dtype>::OpGather(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_GATHER, id_)
+OpGather<Dtype>::OpGather(SubgraphTraverser* sgt_,
+                          TosaAttributeBase* attribute_,
+                          TosaQuantInfoBase* qinfo_,
+                          uint64_t id_)
+    : GraphNode(sgt_, Op_GATHER, id_)
 {
     setRequiredOperands(2, 1);
 }
@@ -102,7 +105,7 @@
         for (int32_t w = 0; w < W; w++)
         {
             int32_t k = this->indices->getTensor()(n, w);
-            ASSERT_MSG_NODE(k >= 0 && k < K, "OpGather: index(%d, %d)=%d exceed valid range [0, %d]", n, w, k, K);
+            REQUIRE(k >= 0 && k < K, "OpGather: index(%d, %d)=%d exceed valid range [0, %d]", n, w, k, K);
             for (int32_t c = 0; c < C; c++)
             {
                 EigenType value                    = this->values->getTensor()(n, k, c);
@@ -115,8 +118,11 @@
 }
 
 template <DType Dtype>
-OpScatter<Dtype>::OpScatter(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_SCATTER, id_)
+OpScatter<Dtype>::OpScatter(SubgraphTraverser* sgt_,
+                            TosaAttributeBase* attribute_,
+                            TosaQuantInfoBase* qinfo_,
+                            uint64_t id_)
+    : GraphNode(sgt_, Op_SCATTER, id_)
 {
     setRequiredOperands(3, 1);
 }
@@ -206,7 +212,7 @@
         for (int w = 0; w < W; w++)
         {
             int32_t k = this->indices->getTensor()(n, w);
-            ASSERT_MSG_NODE(k >= 0 && k < K, "OpScatter: index(%d, %d)=%d exceed valid range [0, %d]", n, w, k, K);
+            REQUIRE(k >= 0 && k < K, "OpScatter: index(%d, %d)=%d exceed valid range [0, %d]", n, w, k, K);
             for (int c = 0; c < C; c++)
             {
                 EigenType value                        = this->input->getTensor()(n, w, c);
diff --git a/reference_model/src/ops/scatter_gather.h b/reference_model/src/ops/scatter_gather.h
index 17ea723..66b584a 100644
--- a/reference_model/src/ops/scatter_gather.h
+++ b/reference_model/src/ops/scatter_gather.h
@@ -27,7 +27,7 @@
 class OpGather : public GraphNode
 {
 public:
-    OpGather(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpGather(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpGather();
 
     virtual int checkTensorAttributes();
@@ -49,7 +49,7 @@
 class OpScatter : public GraphNode
 {
 public:
-    OpScatter(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpScatter(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpScatter();
 
     virtual int checkTensorAttributes();
diff --git a/reference_model/src/ops/tensor_ops.cc b/reference_model/src/ops/tensor_ops.cc
index 0007553..045c0a5 100644
--- a/reference_model/src/ops/tensor_ops.cc
+++ b/reference_model/src/ops/tensor_ops.cc
@@ -22,8 +22,11 @@
 using namespace tosa;
 
 template <int Rank, DType Dtype>
-OpArgMax<Rank, Dtype>::OpArgMax(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_ARGMAX, id_)
+OpArgMax<Rank, Dtype>::OpArgMax(SubgraphTraverser* sgt_,
+                                TosaAttributeBase* attribute_,
+                                TosaQuantInfoBase* qinfo_,
+                                uint64_t id_)
+    : GraphNode(sgt_, Op_ARGMAX, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -66,8 +69,11 @@
 }
 
 template <DType Dtype>
-OpAvgPool2d<Dtype>::OpAvgPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_AVG_POOL2D, id_)
+OpAvgPool2d<Dtype>::OpAvgPool2d(SubgraphTraverser* sgt_,
+                                TosaAttributeBase* attribute_,
+                                TosaQuantInfoBase* qinfo_,
+                                uint64_t id_)
+    : GraphNode(sgt_, Op_AVG_POOL2D, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(4);
@@ -142,9 +148,6 @@
     int32_t left_index  = pad_left / stride;
     int32_t right_index = pad_right / stride;
 
-    // not handle ultra small activation yet
-    ASSERT_MSG_NODE((out_size - 1 - right_index) >= left_index, "AvgPool2d: Small activations not supported yet");
-
     // minus the number of pad bit this index cover
     while (left_index >= 0)
     {
@@ -176,7 +179,8 @@
     int out_width    = this->out->getShape()[2];
     int out_channels = this->out->getShape()[3];
 
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpAvgPool2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(in_batch != out_batch, "OpAvgPool2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(in_channels != out_channels, "OpAvgPool2d: tensor channel mismatch %d != %d", in_channels, out_channels);
 
     int padding_top    = this->attribute->padding()[0];
     int padding_bottom = this->attribute->padding()[1];
@@ -260,12 +264,19 @@
 
     if (Dtype != DType_FLOAT)
     {
-        this->out->getTensor() = sum.binaryExpr(div_map, [](AccEigenType value, int32_t div) -> OutEigenType {
-            int32_t multiplier, shift;
-            TosaReference::QuantUtil::reciprocal_scale(div, multiplier, shift);
+        try
+        {
+            this->out->getTensor() = sum.binaryExpr(div_map, [](AccEigenType value, int32_t div) -> OutEigenType {
+                int32_t multiplier, shift;
+                TosaReference::QuantUtil::reciprocal_scale(div, multiplier, shift);
 
-            return (OutEigenType)TosaReference::QuantUtil::apply_scale_32(value, multiplier, shift, false);
-        });
+                return (OutEigenType)TosaReference::QuantUtil::apply_scale_32(value, multiplier, shift, false);
+            });
+        }
+        catch (std::string desc)
+        {
+            REQUIRE(false, "OpAvgPool2d apply_scale_32() fails: %s.", desc.c_str());
+        }
         this->out->getTensor() = this->out->getTensor() + (OutEigenType)(this->qinfo->output_zp());
         this->out->getTensor() = this->out->getTensor().cwiseMax((OutEigenType)QMin);
         this->out->getTensor() = this->out->getTensor().cwiseMin((OutEigenType)QMax);
@@ -279,8 +290,11 @@
 }
 
 template <DType InDtype, DType WeightDtype>
-OpConv2d<InDtype, WeightDtype>::OpConv2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_CONV2D, id_)
+OpConv2d<InDtype, WeightDtype>::OpConv2d(SubgraphTraverser* sgt_,
+                                         TosaAttributeBase* attribute_,
+                                         TosaQuantInfoBase* qinfo_,
+                                         uint64_t id_)
+    : GraphNode(sgt_, Op_CONV2D, id_)
 {
     setRequiredOperands(3, 1);
     setRequiredRank(4);
@@ -361,13 +375,12 @@
     int out_width    = this->output->getShape()[2];
     int out_channels = this->output->getShape()[3];
 
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
-    ASSERT_MSG_NODE(f_in_channels == in_channels, "OpConv2d: tensor input channel mismatch %d != %d", f_in_channels,
-                    in_channels);
-    ASSERT_MSG_NODE(f_out_channels == out_channels, "OpConv2d: tensor output channel mismatch %d != %d", f_out_channels,
-                    out_channels);
-    ASSERT_MSG_NODE(b_out_channels == out_channels, "OpConv2d: tensor output channel mismatch %d != %d", b_out_channels,
-                    out_channels);
+    ERROR_IF(in_batch != out_batch, "OpConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(f_in_channels != in_channels, "OpConv2d: tensor input channel mismatch %d != %d", f_in_channels,
+             in_channels);
+    ERROR_IF(f_out_channels != out_channels, "OpConv2d: tensor output channel mismatch %d != %d", f_out_channels,
+             out_channels);
+    ERROR_IF(b_out_channels != out_channels, "OpConv2d: bias channel mismatch %d != %d", b_out_channels, out_channels);
 
     int padding_top    = this->attribute->padding()[0];
     int padding_bottom = this->attribute->padding()[1];
@@ -469,10 +482,11 @@
 }
 
 template <DType InDtype, DType WeightDtype>
-OpDepthwiseConv2d<InDtype, WeightDtype>::OpDepthwiseConv2d(TosaAttributeBase* attribute_,
+OpDepthwiseConv2d<InDtype, WeightDtype>::OpDepthwiseConv2d(SubgraphTraverser* sgt_,
+                                                           TosaAttributeBase* attribute_,
                                                            TosaQuantInfoBase* qinfo_,
                                                            uint64_t id_)
-    : GraphNode(Op_DEPTHWISE_CONV2D, id_)
+    : GraphNode(sgt_, Op_DEPTHWISE_CONV2D, id_)
 {
     setRequiredOperands(3, 1);
     setRequiredRank(4);
@@ -553,14 +567,13 @@
     int out_width    = this->output->getShape()[2];
     int out_channels = this->output->getShape()[3];
 
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpDepthwiseConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
-    ASSERT_MSG_NODE(f_in_channels == in_channels, "OpDepthwiseConv2d: tensor input channel mismatch %d != %d",
-                    f_in_channels, in_channels);
-    ASSERT_MSG_NODE(in_channels * f_multiplier == out_channels,
-                    "OpDepthwiseConv2d: tensor output channel mismatch %d != %d", in_channels * f_multiplier,
-                    out_channels);
-    ASSERT_MSG_NODE(b_out_channels == out_channels, "OpDepthwiseConv2d: tensor b_out_channels mismatch %d != %d",
-                    b_out_channels, out_channels);
+    ERROR_IF(in_batch != out_batch, "OpDepthwiseConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(f_in_channels != in_channels, "OpDepthwiseConv2d: tensor input channel mismatch %d != %d", f_in_channels,
+             in_channels);
+    ERROR_IF(in_channels * f_multiplier != out_channels, "OpDepthwiseConv2d: tensor output channel mismatch %d != %d",
+             in_channels * f_multiplier, out_channels);
+    ERROR_IF(b_out_channels != out_channels, "OpDepthwiseConv2d: bias channels mismatch %d != %d", b_out_channels,
+             out_channels);
 
     int padding_top    = this->attribute->padding()[0];
     int padding_bottom = this->attribute->padding()[1];
@@ -651,10 +664,11 @@
 }
 
 template <DType InDtype, DType WeightDtype>
-OpFullyConnected<InDtype, WeightDtype>::OpFullyConnected(TosaAttributeBase* attribute_,
+OpFullyConnected<InDtype, WeightDtype>::OpFullyConnected(SubgraphTraverser* sgt_,
+                                                         TosaAttributeBase* attribute_,
                                                          TosaQuantInfoBase* qinfo_,
                                                          uint64_t id_)
-    : GraphNode(Op_FULLY_CONNECTED, id_)
+    : GraphNode(sgt_, Op_FULLY_CONNECTED, id_)
 {
     setRequiredOperands(3, 1);
     setRequiredRank(2);
@@ -738,8 +752,11 @@
 }
 
 template <DType Dtype>
-OpMatMul<Dtype>::OpMatMul(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_MATMUL, id_)
+OpMatMul<Dtype>::OpMatMul(SubgraphTraverser* sgt_,
+                          TosaAttributeBase* attribute_,
+                          TosaQuantInfoBase* qinfo_,
+                          uint64_t id_)
+    : GraphNode(sgt_, Op_MATMUL, id_)
 {
     setRequiredOperands(2, 1);
     setRequiredRank(3);
@@ -866,8 +883,11 @@
 }
 
 template <DType Dtype>
-OpMaxPool2d<Dtype>::OpMaxPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_MAX_POOL2D, id_)
+OpMaxPool2d<Dtype>::OpMaxPool2d(SubgraphTraverser* sgt_,
+                                TosaAttributeBase* attribute_,
+                                TosaQuantInfoBase* qinfo_,
+                                uint64_t id_)
+    : GraphNode(sgt_, Op_MAX_POOL2D, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(4);
@@ -936,7 +956,8 @@
     int out_width    = this->out->getShape()[2];
     int out_channels = this->out->getShape()[3];
 
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpMaxPool2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(in_batch != out_batch, "OpMaxPool2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(in_channels != out_channels, "OpMaxPool2d: tensor channel mismatch %d != %d", in_channels, out_channels);
 
     int padding_top    = this->attribute->padding()[0];
     int padding_bottom = this->attribute->padding()[1];
@@ -1004,10 +1025,11 @@
 }
 
 template <DType InDtype, DType OutDtype>
-OpTransposeConv2d<InDtype, OutDtype>::OpTransposeConv2d(TosaAttributeBase* attribute_,
+OpTransposeConv2d<InDtype, OutDtype>::OpTransposeConv2d(SubgraphTraverser* sgt_,
+                                                        TosaAttributeBase* attribute_,
                                                         TosaQuantInfoBase* qinfo_,
                                                         uint64_t id_)
-    : GraphNode(Op_TRANSPOSE_CONV2D, id_)
+    : GraphNode(sgt_, Op_TRANSPOSE_CONV2D, id_)
 {
     setRequiredOperands(3, 1);
     setRequiredRank(4);
@@ -1104,13 +1126,13 @@
     int dilation_h   = this->attribute->dilation()[0];
     int dilation_w   = this->attribute->dilation()[1];
 
-    ASSERT_MSG_NODE(in_batch == out_batch, "OpTransposeConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
-    ASSERT_MSG_NODE(f_in_channels == in_channels, "OpTransposeConv2d: tensor input channel mismatch %d != %d",
-                    f_in_channels, in_channels);
-    ASSERT_MSG_NODE(f_out_channels == out_channels, "OpTransposeConv2d: tensor output channel mismatch %d != %d",
-                    f_out_channels, out_channels);
-    ASSERT_MSG_NODE(b_out_channels == out_channels, "OpDepthwiseConv2d: tensor b_out_channels mismatch %d != %d",
-                    b_out_channels, out_channels);
+    ERROR_IF(in_batch != out_batch, "OpTransposeConv2d: tensor batch mismatch %d != %d", in_batch, out_batch);
+    ERROR_IF(f_in_channels != in_channels, "OpTransposeConv2d: tensor input channel mismatch %d != %d", f_in_channels,
+             in_channels);
+    ERROR_IF(f_out_channels != out_channels, "OpTransposeConv2d: tensor output channel mismatch %d != %d",
+             f_out_channels, out_channels);
+    ERROR_IF(b_out_channels != out_channels, "OpDepthwiseConv2d: bias channels mismatch %d != %d", b_out_channels,
+             out_channels);
 
     DEBUG_INFO(OP,
                "perform OpTransposeConv2d, input.shape=[%d,%d,%d,%d], weight.shape=[%d,%d,%d,%d], "
diff --git a/reference_model/src/ops/tensor_ops.h b/reference_model/src/ops/tensor_ops.h
index 9aaa140..6ffc27d 100644
--- a/reference_model/src/ops/tensor_ops.h
+++ b/reference_model/src/ops/tensor_ops.h
@@ -28,7 +28,7 @@
 class OpArgMax : public GraphNode
 {
 public:
-    OpArgMax(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpArgMax(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpArgMax();
 
     virtual int checkTensorAttributes();
@@ -49,7 +49,7 @@
 class OpAvgPool2d : public GraphNode
 {
 public:
-    OpAvgPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpAvgPool2d(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpAvgPool2d();
 
     virtual int checkTensorAttributes();
@@ -80,7 +80,7 @@
 class OpConv2d : public GraphNode
 {
 public:
-    OpConv2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpConv2d(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpConv2d();
 
     virtual int checkTensorAttributes() final;
@@ -112,7 +112,7 @@
 class OpDepthwiseConv2d : public GraphNode
 {
 public:
-    OpDepthwiseConv2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpDepthwiseConv2d(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpDepthwiseConv2d();
 
     virtual int checkTensorAttributes() final;
@@ -144,7 +144,7 @@
 class OpFullyConnected : public GraphNode
 {
 public:
-    OpFullyConnected(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpFullyConnected(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpFullyConnected();
 
     virtual int checkTensorAttributes() final;
@@ -174,7 +174,7 @@
 class OpMatMul : public GraphNode
 {
 public:
-    OpMatMul(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpMatMul(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpMatMul();
 
     virtual int checkTensorAttributes() final;
@@ -205,7 +205,7 @@
 class OpMaxPool2d : public GraphNode
 {
 public:
-    OpMaxPool2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpMaxPool2d(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpMaxPool2d();
 
     virtual int checkTensorAttributes();
@@ -226,7 +226,7 @@
 class OpTransposeConv2d : public GraphNode
 {
 public:
-    OpTransposeConv2d(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpTransposeConv2d(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpTransposeConv2d();
 
     virtual int checkTensorAttributes() final;
diff --git a/reference_model/src/ops/type_conversion.cc b/reference_model/src/ops/type_conversion.cc
index d988c57..657eebf 100644
--- a/reference_model/src/ops/type_conversion.cc
+++ b/reference_model/src/ops/type_conversion.cc
@@ -23,8 +23,11 @@
 using namespace tosa;
 
 template <int Rank, DType InDtype, DType OutDtype>
-OpRescale<Rank, InDtype, OutDtype>::OpRescale(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_RESCALE, id_)
+OpRescale<Rank, InDtype, OutDtype>::OpRescale(SubgraphTraverser* sgt_,
+                                              TosaAttributeBase* attribute_,
+                                              TosaQuantInfoBase* qinfo_,
+                                              uint64_t id_)
+    : GraphNode(sgt_, Op_RESCALE, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
@@ -101,53 +104,68 @@
         int32_t channel_multiplier, channel_shift;
         Eigen::array<Eigen::Index, 2> begin, size;
         size = Eigen::array<Eigen::Index, 2>({ shape_2d[0], 1 });
-        for (int32_t i = 0; i < shape_2d[1]; i++)
+        try
         {
-            begin                        = Eigen::array<Eigen::Index, 2>({ 0, i });
-            curr_channel_slice_prescaled = input_reshaped.slice(begin, size);
-            channel_multiplier           = multiplier[i];
-            channel_shift                = shift[i];
-            curr_channel_slice_postscaled =
-                curr_channel_slice_prescaled.unaryExpr([input_zp, output_zp, channel_multiplier, channel_shift,
-                                                        double_round, scale32](InEigenType in_val) -> OutEigenType {
-                    InEigenType input_zp_shifted = in_val - (InEigenType)input_zp;
-                    int32_t scaled;
-                    if (scale32)
-                        scaled = TosaReference::QuantUtil::apply_scale_32(input_zp_shifted, channel_multiplier,
-                                                                          channel_shift, double_round);
-                    else
-                        scaled = TosaReference::QuantUtil::apply_scale_16(input_zp_shifted, channel_multiplier,
-                                                                          channel_shift);
-                    OutEigenType out_val = (OutEigenType)(scaled + output_zp);
-                    out_val              = std::max<OutEigenType>(out_val, QMin);
-                    out_val              = std::min<OutEigenType>(out_val, QMax);
-                    return out_val;
-                });
-
-            for (int32_t j = 0; j < shape_2d[0]; j++)
+            for (int32_t i = 0; i < shape_2d[1]; i++)
             {
-                output_2d(j, i) = curr_channel_slice_postscaled(j, 0);
+                begin                        = Eigen::array<Eigen::Index, 2>({ 0, i });
+                curr_channel_slice_prescaled = input_reshaped.slice(begin, size);
+                channel_multiplier           = multiplier[i];
+                channel_shift                = shift[i];
+                curr_channel_slice_postscaled =
+                    curr_channel_slice_prescaled.unaryExpr([input_zp, output_zp, channel_multiplier, channel_shift,
+                                                            double_round, scale32](InEigenType in_val) -> OutEigenType {
+                        InEigenType input_zp_shifted = in_val - (InEigenType)input_zp;
+                        int32_t scaled;
+                        if (scale32)
+                            scaled = TosaReference::QuantUtil::apply_scale_32(input_zp_shifted, channel_multiplier,
+                                                                              channel_shift, double_round);
+                        else
+                            scaled = TosaReference::QuantUtil::apply_scale_16(input_zp_shifted, channel_multiplier,
+                                                                              channel_shift);
+                        OutEigenType out_val = (OutEigenType)(scaled + output_zp);
+                        out_val              = std::max<OutEigenType>(out_val, QMin);
+                        out_val              = std::min<OutEigenType>(out_val, QMax);
+                        return out_val;
+                    });
+
+                for (int32_t j = 0; j < shape_2d[0]; j++)
+                {
+                    output_2d(j, i) = curr_channel_slice_postscaled(j, 0);
+                }
             }
         }
+        catch (std::string desc)
+        {
+            REQUIRE(false, "OpRescale apply_scale_32/16() fails: %s.", desc.c_str());
+        }
     }
     else
     {
         int32_t tensor_multiplier = multiplier[0];
         int32_t tensor_shift      = shift[0];
-        output_2d = input_reshaped.unaryExpr([input_zp, output_zp, tensor_multiplier, tensor_shift, double_round,
-                                              scale32](InEigenType in_val) -> OutEigenType {
-            InEigenType input_zp_shifted = in_val - (InEigenType)input_zp;
-            int32_t scaled;
-            if (scale32)
-                scaled = TosaReference::QuantUtil::apply_scale_32(input_zp_shifted, tensor_multiplier, tensor_shift,
-                                                                  double_round);
-            else
-                scaled = TosaReference::QuantUtil::apply_scale_16(input_zp_shifted, tensor_multiplier, tensor_shift);
-            OutEigenType out_val = (OutEigenType)(scaled + output_zp);
-            out_val              = std::max<OutEigenType>(out_val, QMin);
-            out_val              = std::min<OutEigenType>(out_val, QMax);
-            return out_val;
-        });
+        try
+        {
+            output_2d = input_reshaped.unaryExpr([input_zp, output_zp, tensor_multiplier, tensor_shift, double_round,
+                                                  scale32](InEigenType in_val) -> OutEigenType {
+                InEigenType input_zp_shifted = in_val - (InEigenType)input_zp;
+                int32_t scaled;
+                if (scale32)
+                    scaled = TosaReference::QuantUtil::apply_scale_32(input_zp_shifted, tensor_multiplier, tensor_shift,
+                                                                      double_round);
+                else
+                    scaled =
+                        TosaReference::QuantUtil::apply_scale_16(input_zp_shifted, tensor_multiplier, tensor_shift);
+                OutEigenType out_val = (OutEigenType)(scaled + output_zp);
+                out_val              = std::max<OutEigenType>(out_val, QMin);
+                out_val              = std::min<OutEigenType>(out_val, QMax);
+                return out_val;
+            });
+        }
+        catch (std::string desc)
+        {
+            REQUIRE(false, "OpRescale apply_scale_32/16() fails: %s.", desc.c_str());
+        }
     }
 
     // reshape [d0 * d1 ..., dn] back to [d0, d1, ..., dn]
@@ -162,8 +180,11 @@
 }
 
 template <int Rank, DType InDtype, DType OutDtype>
-OpCast<Rank, InDtype, OutDtype>::OpCast(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_)
-    : GraphNode(Op_CAST, id_)
+OpCast<Rank, InDtype, OutDtype>::OpCast(SubgraphTraverser* sgt_,
+                                        TosaAttributeBase* attribute_,
+                                        TosaQuantInfoBase* qinfo_,
+                                        uint64_t id_)
+    : GraphNode(sgt_, Op_CAST, id_)
 {
     setRequiredOperands(1, 1);
     setRequiredRank(0, 6);
diff --git a/reference_model/src/ops/type_conversion.h b/reference_model/src/ops/type_conversion.h
index 6ec4d6d..060e14e 100644
--- a/reference_model/src/ops/type_conversion.h
+++ b/reference_model/src/ops/type_conversion.h
@@ -26,7 +26,7 @@
 class OpRescale : public GraphNode
 {
 public:
-    OpRescale(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpRescale(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpRescale();
 
     virtual int checkTensorAttributes() final;
@@ -140,7 +140,7 @@
 class OpCast : public GraphNode
 {
 public:
-    OpCast(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
+    OpCast(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_);
     virtual ~OpCast();
 
     virtual int checkTensorAttributes() final;