IVGCVSW-7605 IVGCVSW-7604 Implement Squeeze and ExpandDims operators for Classic and Opaque Delegate

 * Implemented unsupported operators in Classic Delegate.
 * Added unit tests.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ib39eeea53c114b15943e8dc2e796ce64c40cb3a5
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
index 80631cc..af9b446 100644
--- a/delegate/test/RedefineTestHelper.hpp
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -21,7 +21,7 @@
 namespace
 {
 
-std::vector<char> CreateRedefineTfLiteModel(
+std::vector<char> CreateReshapeTfLiteModel(
         tflite::BuiltinOperator redefineOperatorCode,
         tflite::TensorType tensorType,
         const std::vector<int32_t>& inputTensorShape,
@@ -141,6 +141,127 @@
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
+std::vector<char> CreateRedefineTfLiteModel(
+        tflite::BuiltinOperator redefineOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector<int32_t>& inputTensorShape,
+        const std::vector<int32_t>& outputTensorShape,
+        const std::vector<int32_t>& squeezeOrAxisData,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors;
+    std::vector<int32_t> operatorInputs;
+    std::vector<int> subgraphInputs;
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions;
+
+    if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE)
+    {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         2,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+        tensors = { inputTensor, outputTensor};
+        operatorInputs = {0};
+        subgraphInputs = {0};
+        operatorBuiltinOptions =
+                CreateSqueezeOptions(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector(squeezeOrAxisData.data(),
+                                                                    squeezeOrAxisData.size())).Union();
+
+        operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions;
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS)
+    {
+        buffers.push_back(
+                CreateBuffer(flatBufferBuilder,
+                             flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(squeezeOrAxisData.data()),
+                                                            sizeof(int32_t) * squeezeOrAxisData.size())));
+        auto shapeTensor = CreateTensor(flatBufferBuilder,
+                                        flatBufferBuilder.CreateVector<int32_t>( { 1 } ),
+                                        tflite::TensorType_INT32,
+                                        2,
+                                        flatBufferBuilder.CreateString("axis"));
+
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         3,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+
+        tensors = { inputTensor, outputTensor, shapeTensor };
+        operatorInputs = {0, 2};
+        subgraphInputs = {0, 2};
+        operatorBuiltinOptions = CreateExpandDimsOptions(flatBufferBuilder).Union();
+
+        operatorBuiltinOptionsType = BuiltinOptions_ExpandDimsOptions;
+    }
+
+    const std::vector<int32_t> operatorOutputs{1};
+    flatbuffers::Offset <Operator> redefineOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&redefineOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Redefine Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         redefineOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
 template <typename T>
 void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
                   tflite::TensorType tensorType,
@@ -149,20 +270,45 @@
                   std::vector<int32_t>& outputShape,
                   std::vector<T>& inputValues,
                   std::vector<T>& expectedOutputValues,
-                  std::vector<int32_t>& targetShape,
+                  std::vector<int32_t>& additionalData,
                   bool useOption = true,
                   float quantScale = 1.0f,
                   int quantOffset  = 0)
 {
     using namespace delegateTestInterpreter;
-    std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
-                                                              tensorType,
-                                                              inputShape,
-                                                              outputShape,
-                                                              targetShape,
-                                                              useOption,
-                                                              quantScale,
-                                                              quantOffset);
+
+    std::vector<char> modelBuffer;
+    if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS)
+    {
+        modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+                                                tensorType,
+                                                inputShape,
+                                                outputShape,
+                                                additionalData,
+                                                quantScale,
+                                                quantOffset);
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_RESHAPE)
+    {
+        modelBuffer = CreateReshapeTfLiteModel(redefineOperatorCode,
+                                               tensorType,
+                                               inputShape,
+                                               outputShape,
+                                               additionalData,
+                                               useOption,
+                                               quantScale,
+                                               quantOffset);
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE)
+    {
+        modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+                                                tensorType,
+                                                inputShape,
+                                                outputShape,
+                                                additionalData,
+                                                quantScale,
+                                                quantOffset);
+    }
 
     // Setup interpreter with just TFLite Runtime.
     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);