IVGCVSW-1929: Fix for this defect (QASYM8 no scale)

* Now hand in an optional vector of strings to Optimize
  function in which errors/warning messages are placed.
* Optimize function changed to check outputs of each
  layer. If they are Quantized 8 bit but the scale has
  not been set an error message is added for each such output.
* Unit test modelled on defect report added to exercise the fix.

!android-nn-driver:152483

Change-Id: If9901f5324a516f1ab62858266b38f98dae16201
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 8e6f490..51490e3 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -69,10 +69,35 @@
     return m_Graph->SerializeToDot(stream);
 }
 
+bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
+{
+    bool noErrors = true;
+    unsigned int numOutputs = layer->GetNumOutputSlots();
+    for (unsigned int i = 0; i < numOutputs; i++) {
+        const OutputSlot &outputSlot = layer->GetOutputSlot(i);
+        const TensorInfo &info = outputSlot.GetTensorInfo();
+        if (DataType::QuantisedAsymm8 == info.GetDataType()) {
+            if (0.f == info.GetQuantizationScale()) {
+                noErrors = false;
+                std::stringstream ss;
+                ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
+                   << " (" << layer->GetNameStr() << ") is of type"
+                   << " Quantized 8 bit but its scale parameter has not been set";
+                BOOST_LOG_TRIVIAL(warning) << ss.str() ;
+                if (errMessages) {
+                    errMessages.value().push_back(ss.str());
+                }
+            }
+        }
+    }
+    return noErrors;
+}
+
 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
                               const std::vector<armnn::Compute>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
-                              const OptimizerOptions& options)
+                              const OptimizerOptions& options,
+                              Optional<std::vector<std::string>&> errMessages)
 {
     if (backendPreferences.empty()) {
         throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
@@ -123,24 +148,41 @@
         }
     }
     if (availablePreferredBackends.empty()) {
-        BOOST_LOG_TRIVIAL(warning) << "None of the preferred backends " << backendPreferences
-                                   << " are supported. Current platform provides " << spec.m_SupportedComputeDevices;
-        return {nullptr, &IOptimizedNetwork::Destroy};
+        std::stringstream failureMsg;
+        failureMsg << "ERROR: None of the preferred backends " << backendPreferences
+                   << " are supported. Current platform provides " << spec.m_SupportedComputeDevices;
+        BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
+        if (errMessages) {
+            errMessages.value().push_back(failureMsg.str());
+        }
+        return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
     }
 
     auto ReturnWithError = [&](Layer* layer)
     {
-        BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
-                    << " is not supported on any preferred backend " << backendPreferences;
+        std::stringstream failureMsg;
+        failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
+                   << " is not supported on any preferred backend " << backendPreferences;
+        BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
+        if (errMessages) {
+            errMessages.value().push_back(failureMsg.str());
+        }
         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
     };
 
     // Assign a compute device for all nodes
+    bool bErrorFound = false;
     for (auto&& layer : optNetObjPtr->GetGraph())
     {
         DataType dataType = layer->GetDataType();
         std::string reasonIfUnsupported;
         bool found = false;
+        if (!CheckScaleSetOnQuantizedType(layer, errMessages))
+        {
+            // don't bomb immediately, find all the quantized outputs
+            // which haven't had a scale set and report them all back.
+            bErrorFound = true;
+        }
         for (const armnn::Compute& backend : availablePreferredBackends)
         {
             // need to set the compute device on the layer
@@ -216,11 +258,16 @@
                         break;
                     }
                 }
-                BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
-                                           << " is not supported on requested backend " << layer->GetComputeDevice()
-                                           << " for data type " << GetDataTypeName(dataType)
-                                           << " (reason: " << reasonIfUnsupported
-                                           << "), falling back to the next backend.";
+                std::stringstream warningMsg;
+                warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
+                           << " is not supported on requested backend " << layer->GetComputeDevice()
+                           << " for data type " << GetDataTypeName(dataType)
+                           << " (reason: " << reasonIfUnsupported
+                           << "), falling back to the next backend.";
+                BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
+                if (errMessages) {
+                    errMessages.value().push_back(warningMsg.str());
+                }
             }
             else
             {
@@ -248,6 +295,10 @@
             }
         }
     }
+    if (bErrorFound)
+    {
+        return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
+    }
 
     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
                                                                 OptimizeInverseConversionsFp32()));
@@ -261,6 +312,7 @@
     return optNet;
 }
 
+
 Network::Network()
 : m_Graph(std::make_unique<Graph>())
 {
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index c342f22..f131946 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -981,8 +981,8 @@
     armnn::OptimizerOptions optimizerOptions;
     optimizerOptions.m_ReduceFp32ToFp16 = true;
 
-    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
-                                                               optimizerOptions);
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+            net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
     const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
 
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 0c896d8..0237387 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -485,4 +485,60 @@
     BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
+BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
+{
+    // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
+    using namespace armnn;
+
+    // Create runtime in which test will run
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    // build up the structure of the network
+    INetworkPtr net(INetwork::Create());
+    armnn::IConnectableLayer* input = net->AddInputLayer(
+            0,
+            "input"
+    );
+    armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(
+            armnn::SoftmaxDescriptor(),
+            "softmax"
+    );
+    armnn::IConnectableLayer* output = net->AddOutputLayer(
+            0,
+            "output"
+    );
+
+    input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
+    softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
+            armnn::TensorShape({ 1, 5 }),
+            armnn::DataType::QuantisedAsymm8,
+            1.0f/255,
+            0
+    ));
+
+    softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
+            armnn::TensorShape({ 1, 5 }),
+            armnn::DataType::QuantisedAsymm8
+    ));
+
+    std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
+    std::vector<std::string> errMessages;
+    armnn::IOptimizedNetworkPtr optNet = Optimize(
+            *net,
+            backends,
+            runtime->GetDeviceSpec(),
+            OptimizerOptions(),
+            errMessages
+    );
+    
+    BOOST_TEST(errMessages.size() == 1);
+    BOOST_TEST(errMessages[0] ==
+        "ERROR: output 0 of layer Softmax (softmax) is of type "
+        "Quantized 8 bit but its scale parameter has not been set");
+    BOOST_TEST(!optNet);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 89b823a..c502ad9 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -101,7 +101,8 @@
 
         armnn::INetworkPtr network =
             m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
-        auto optimized = Optimize(*network, { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
+        auto optimized = Optimize(*network,
+                { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
         armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
         if (ret != armnn::Status::Success)
         {
@@ -122,7 +123,8 @@
 
         armnn::INetworkPtr network =
             m_Parser->CreateNetworkFromString(m_Prototext.c_str());
-        auto optimized = Optimize(*network, { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
+        auto optimized = Optimize(*network,
+                { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
         armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
         if (ret != armnn::Status::Success)
         {