IVGCVSW-4262 Add Calls to IsReshapeSupported and IsPermuteSupported

!armnn:2486

* Add calls before addition of these layers in ConvertConcatenation
* Add outputInfo parameter wherever needed for IsReshapeSupported

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: Ic5d142ea046161960ff2fc137bd261ebb4e6ac0c
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index e6f8acb..2cb8497 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -702,6 +702,7 @@
                                data.m_Backends,
                                isSupported,
                                input.GetTensorInfo(),
+                               outputInfo,
                                reshapeDescriptor);
 
     if (!isSupported)
@@ -1299,7 +1300,7 @@
 
     IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
     assert(layer != nullptr);
-    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    bool isReshapeSupported = BroadcastTensor(input0, input1, outInfo, layer, data);
     if (!isReshapeSupported)
     {
         return false;
@@ -1354,7 +1355,7 @@
 
     IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
     assert(layer != nullptr);
-    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, layer, data);
     if (!isReshapeSupported)
     {
         return false;
@@ -1517,7 +1518,7 @@
         return Fail("%s: AddPreluLayer failed", __func__);
     }
 
-    bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
+    bool isReshapeSupported = BroadcastTensor(input, alpha, outputInfo, layer, data);
     if (!isReshapeSupported)
     {
         return false;