Fix quantizer crash by zero tensor

Signed-off-by: Jung Tae-young <tee.ty.jung@openedges.com>
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Change-Id: I1f0dfa4ca76e1c85a2b8fb5de12039a260224951
diff --git a/src/armnn/NetworkQuantizationScheme.hpp b/src/armnn/NetworkQuantizationScheme.hpp
index a5f96a1..0effa1f 100644
--- a/src/armnn/NetworkQuantizationScheme.hpp
+++ b/src/armnn/NetworkQuantizationScheme.hpp
@@ -40,6 +40,12 @@
         min = std::min(0.0, min); // min <= 0.0
         max = std::max(0.0, max); // max >= 0.0
 
+        // To avoid dividing by zero when quantizing a zero filled tensor
+        if (min == 0.0 && max == 0.0)
+        {
+            max = 1.0;
+        }
+
         // Assumes quantization range [0-highest]
         double scale = (max-min) / highest;
         double offset = -min / scale;
@@ -64,6 +70,12 @@
             throw InvalidArgumentException("min > max will result in invalid quantization.");
         }
 
+        // To avoid dividing by zero when quantizing a zero filled tensor
+        if (min == 0.0 && max == 0.0)
+        {
+            max = 1.0;
+        }
+
         double highest = (1 << (NumBits()-1)) - 1; // (numbits-1) accounts for the sign bit
 
         double extent = std::max(std::abs(min), std::abs(max));