IVGCVSW-1929: Fix for this defect (QASYM8 no scale)

* Now hand in an optional vector of strings to Optimize
  function in which errors/warning messages are placed.
* Optimize function changed to check outputs of each
  layer. If they are Quantized 8 bit but the scale has
  not been set an error message is added for each such output.
* Unit test modelled on defect report added to exercise the fix.

!android-nn-driver:152483

Change-Id: If9901f5324a516f1ab62858266b38f98dae16201
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 0c896d8..0237387 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -485,4 +485,60 @@
     BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
+BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
+{
+    // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
+    using namespace armnn;
+
+    // Create runtime in which test will run
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    // build up the structure of the network
+    INetworkPtr net(INetwork::Create());
+    armnn::IConnectableLayer* input = net->AddInputLayer(
+            0,
+            "input"
+    );
+    armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(
+            armnn::SoftmaxDescriptor(),
+            "softmax"
+    );
+    armnn::IConnectableLayer* output = net->AddOutputLayer(
+            0,
+            "output"
+    );
+
+    input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
+    softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
+            armnn::TensorShape({ 1, 5 }),
+            armnn::DataType::QuantisedAsymm8,
+            1.0f/255,
+            0
+    ));
+
+    softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
+            armnn::TensorShape({ 1, 5 }),
+            armnn::DataType::QuantisedAsymm8
+    ));
+
+    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<std::string> errMessages;
+    armnn::IOptimizedNetworkPtr optNet = Optimize(
+            *net,
+            backends,
+            runtime->GetDeviceSpec(),
+            OptimizerOptions(),
+            errMessages
+    );
+    
+    BOOST_TEST(errMessages.size() == 1);
+    BOOST_TEST(errMessages[0] ==
+        "ERROR: output 0 of layer Softmax (softmax) is of type "
+        "Quantized 8 bit but its scale parameter has not been set");
+    BOOST_TEST(!optNet);
+}
+
 BOOST_AUTO_TEST_SUITE_END()