IVGCVSW-1961: Add converter method for SQUEEZE to V1.1 section of HalPolicy

Change-Id: I15dffef32d394b13e57df134000b7dca4b8788af
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index a94f305..1b1c06e 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -33,6 +33,8 @@
                 return ConvertMean(operation, model, data);
             case V1_1::OperationType::PAD:
                 return ConvertPad(operation, model, data);
+            case V1_1::OperationType::SQUEEZE:
+                return ConvertSqueeze(operation, model, data);
             default:
                 return Fail("%s: Operation type %s not supported in ArmnnDriver",
                             __func__, toString(operation.type).c_str());
@@ -272,5 +274,80 @@
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
+{
+    static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if( rank > 4 )
+    {
+        Fail("%s: Inputs with rank greater than: %i are not supported", __func__, rank);
+    }
+
+    // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
+    // if the operand index is out of bounds.
+    const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
+
+    std::vector<int32_t> axis;
+    if(!axisOperand)
+    {
+        axis.assign(dimensionSequence,
+                    dimensionSequence+inputInfo.GetNumDimensions());
+    }
+    else
+    {
+        GetTensorInt32Values(*axisOperand, axis, model, data);
+    }
+
+    std::vector<uint32_t> outputDims;
+    for (auto& i : axis)
+    {
+        auto currentDimension = inputInfo.GetShape()[i];
+        bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
+
+        if (skipSqueeze || currentDimension != 1)
+        {
+            outputDims.push_back(currentDimension);
+        }
+    }
+
+    armnn::TensorShape outShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+
+    armnn::TensorInfo outputInfo = inputInfo;
+    outputInfo.SetShape(outShape);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputInfo.GetShape();
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    if (!IsLayerSupported(__func__,
+                          armnn::IsReshapeSupported,
+                          data.m_Compute,
+                          inputInfo))
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
 } // namespace hal_1_1
-} // namespace armnn_driver
\ No newline at end of file
+} // namespace armnn_driver
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index a918910..06cc574 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -29,6 +29,7 @@
     static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
+    static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
 };
 
 } // namespace hal_1_1
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index a812183..165c63b 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -464,11 +464,15 @@
 using namespace android::nn;
 
 template<typename HalOperation, typename HalModel>
-const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model)
+const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
+                               bool failOnIndexOutOfBounds = true)
 {
     if (inputIndex >= operation.inputs.size())
     {
-        Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
+        if (failOnIndexOutOfBounds)
+        {
+            Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
+        }
         return nullptr;
     }
 
@@ -1036,4 +1040,4 @@
     }
 }
 
-} // namespace armnn_driver
\ No newline at end of file
+} // namespace armnn_driver
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index da0dc5c..a47edf5 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -31,6 +31,7 @@
 RESHAPE                      (FLOAT32,QUANT8_ASYMM)
 RESIZE_BILINEAR              (FLOAT32)
 SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
+SQUEEZE                      (FLOAT32,QUANT8_ASYMM)
 TANH                         (FLOAT32)
 LSTM                         (FLOAT32)
 
@@ -58,7 +59,6 @@
 MEAN
 PAD
 SPACE_TO_BATCH_ND
-SQUEEZE
 STRIDED_SLICE
 SUB
 TRANSPOSE