IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate

 * Updated all tests to use new DelegateTestInterpreter.
 * Fixed some unit tests where the shape was incorrect.
 * Add file identifier to FlatBuffersBuilder, as it is required for
   validation when creating the model using new API.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d
diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp
index 99bb60b..57f4e29 100644
--- a/delegate/test/TransposeTestHelper.hpp
+++ b/delegate/test/TransposeTestHelper.hpp
@@ -5,15 +5,17 @@
 
 #pragma once
 
+#include "TestUtils.hpp"
+
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -76,102 +78,51 @@
                         flatBufferBuilder.CreateVector(&subgraph, 1),
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers, 4));
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+template <typename T>
+void TransposeTest(std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& inputShape,
+                   std::vector<int32_t>& inputPermVecShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& inputValues,
+                   std::vector<int32_t>& inputPermVec,
+                   std::vector<T>& expectedOutputValues)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
-    // set test input data
-    std::vector<int32_t> input0Shape {4, 2, 3};
-    std::vector<int32_t> inputPermVecShape {3};
-    std::vector<int32_t> outputShape {2, 3, 4};
-
-    std::vector<float> input0Values = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
-                                       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
-    std::vector<int32_t> inputPermVec = {2, 0, 1};
-    std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
-                                               13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
-
-    // create model
+    // Create model
     std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
-                                                               input0Shape,
+                                                               inputShape,
                                                                inputPermVecShape,
                                                                outputShape,
                                                                inputPermVec);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data for tflite
-    auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
-    auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        tfLiteInterpreterInput0Data[i] = input0Values[i];
-    }
-
-    auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
-    auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
-    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
-    {
-        tfLiteInterpreterInput1Data[i] = inputPermVec[i];
-    }
-
-    //Set input data for armnn delegate
-    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        armnnDelegateInput0Data[i] = input0Values[i];
-    }
-
-    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
-    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
-    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
-    {
-        armnnDelegateInput1Data[i] = inputPermVec[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-    for (size_t i = 0; i < expectedOutputValues.size(); ++i)
-    {
-        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
-    }
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 }