COMPMID-2225: Add interface support for new quantized data types.

Add support for:
-QSYMM8, 8-bit quantized symmetric
-QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization

Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1236
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validate_examples/graph_depthwiseconvolution.cpp b/tests/validate_examples/graph_depthwiseconvolution.cpp
index 1f5627a..3ea33e1 100644
--- a/tests/validate_examples/graph_depthwiseconvolution.cpp
+++ b/tests/validate_examples/graph_depthwiseconvolution.cpp
@@ -158,29 +158,26 @@
      */
     void consume_parameters(ExampleParams &common_params)
     {
-        common_params.input.width             = width->value();
-        common_params.input.height            = height->value();
-        common_params.input.fm                = channels->value();
-        common_params.input.batch             = batch->value();
-        common_params.input.quant_info.scale  = scale->value();
-        common_params.input.quant_info.offset = offset->value();
-        common_params.input.npy               = input_npy->value();
-        common_params.input.range_low         = input_range_low->value();
-        common_params.input.range_high        = input_range_high->value();
+        common_params.input.width      = width->value();
+        common_params.input.height     = height->value();
+        common_params.input.fm         = channels->value();
+        common_params.input.batch      = batch->value();
+        common_params.input.quant_info = QuantizationInfo(scale->value(), offset->value());
+        common_params.input.npy        = input_npy->value();
+        common_params.input.range_low  = input_range_low->value();
+        common_params.input.range_high = input_range_high->value();
 
-        common_params.weights.width             = weights_width->value();
-        common_params.weights.height            = weights_height->value();
-        common_params.weights.npy               = weights_npy->value();
-        common_params.weights.range_low         = weights_range_low->value();
-        common_params.weights.range_high        = weights_range_high->value();
-        common_params.weights.quant_info.scale  = weights_scale->value();
-        common_params.weights.quant_info.offset = weights_offset->value();
+        common_params.weights.width      = weights_width->value();
+        common_params.weights.height     = weights_height->value();
+        common_params.weights.npy        = weights_npy->value();
+        common_params.weights.range_low  = weights_range_low->value();
+        common_params.weights.range_high = weights_range_high->value();
+        common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
 
         common_params.bias.npy = bias_npy->value();
 
-        common_params.output.quant_info.scale  = output_scale->value();
-        common_params.output.quant_info.offset = output_offset->value();
-        common_params.output.npy               = output_npy->value();
+        common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
+        common_params.output.npy        = output_npy->value();
 
         common_params.convolution.padding_mode     = padding_mode->value();
         common_params.convolution.padding_top      = padding_top->value();