Add resize-bilinear parser to tf-lite

Change-Id: Id35db981b38348e5a941cfbb4cbdfe8cd617a254
Signed-off-by: Bruno Goncalves <bruno.slackware@gmail.com>
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 7ea85bb..4acd308 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -429,6 +429,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_RELU]              =  &TfLiteParser::ParseRelu;
     m_ParserFunctions[tflite::BuiltinOperator_RELU6]             =  &TfLiteParser::ParseRelu6;
     m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           =  &TfLiteParser::ParseReshape;
+    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]   =  &TfLiteParser::ParseResizeBilinear;
     m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           =  &TfLiteParser::ParseSoftmax;
     m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
     m_ParserFunctions[tflite::BuiltinOperator_SUB]               =  &TfLiteParser::ParseSub;
@@ -1359,6 +1360,42 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
+
+    // Data for the parsed tensor args (size) must be stored locally.
+    std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
+
+    BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+    ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
+
+    ResizeBilinearDescriptor desc;
+    desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
+    desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
 void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 7d8151d..5999232 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -102,6 +102,7 @@
     void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
     void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
     void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
+    void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
     void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
     void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
     void ParseSub(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
new file mode 100644
index 0000000..400dc78
--- /dev/null
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+using armnnTfLiteParser::TfLiteParser;
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct ResizeBilinearFixture : public ParserFlatbuffersFixture
+{
+    explicit ResizeBilinearFixture(const std::string & inputShape,
+                                   const std::string & outputShape,
+                                   const std::string & sizeShape,
+                                   const std::string & sizeData)
+    {
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "RESIZE_BILINEAR" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + sizeShape + R"( ,
+                            "type": "INT32",
+                            "buffer": 0,
+                            "name": "sizeTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + inputShape + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 1,
+                            "name": "InputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + outputShape + R"( ,
+                            "type": "FLOAT32",
+                            "buffer": 2,
+                            "name": "OutputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        }
+                    ],
+                "inputs": [ 1 ],
+                "outputs": [ 2 ],
+                "operators": [
+                    {
+                        "opcode_index": 0,
+                        "inputs": [ 1, 0 ],
+                        "outputs": [ 2 ],
+                        "builtin_options_type": "ResizeBilinearOptions",
+                        "builtin_options": {
+                        },
+                        "custom_options_format": "FLEXBUFFERS"
+                    }
+                ],
+              } ],
+              "buffers" : [
+                  { "data": )" + sizeData + R"(, },
+                  { },
+                  { },
+              ]
+            }
+      )";
+      Setup();
+    }
+};
+
+
+struct SimpleResizeBilinearFixture : ResizeBilinearFixture
+{
+    SimpleResizeBilinearFixture()
+        : ResizeBilinearFixture("[ 1, 3, 3, 1 ]",         // inputShape
+                                "[ 1, 5, 5, 1 ]",         // outputShape
+                                "[ 2 ]",                  // sizeShape
+                                "[  5,0,0,0, 5,0,0,0 ]")  // sizeData
+    {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, SimpleResizeBilinearFixture)
+{
+    RunTest<4, armnn::DataType::Float32>(
+                0,
+                {{"InputTensor", { 0.0f, 1.0f, 2.0f,
+                                   3.0f, 4.0f, 5.0f,
+                                   6.0f, 7.0f, 8.0f }}},
+                {{"OutputTensor", { 0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
+                                    1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
+                                    3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
+                                    5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
+                                    6.0f, 6.6f, 7.2f, 7.8f, 8.0f }}}
+                );
+}
+
+BOOST_AUTO_TEST_SUITE_END()