IVGCVSW-1713 Create a minimum unit test to compare the results
before and after passing the FP16 flag in the Android-nn-driver

Change-Id: If8d4ca12421c3bee2526eec98f11d393af822373
diff --git a/test/1.0/Convolution2D.cpp b/test/1.0/Convolution2D.cpp
new file mode 100644
index 0000000..9a5d239
--- /dev/null
+++ b/test/1.0/Convolution2D.cpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../DriverTestHelpers.hpp"
+#include "../Convolution2D.hpp"
+#include "../../1.0/HalPolicy.hpp"
+
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+#include <OperationsUtils.h>
+
+BOOST_AUTO_TEST_SUITE(Convolution2DTests)
+
+using namespace android::hardware;
+using namespace driverTestHelpers;
+using namespace armnn_driver;
+
+namespace driverTestHelpers
+{
+
+void SetModelFp16Flag(V1_0::Model&, bool)
+{
+    // Nothing to do, the V1_0::Model does not support fp16 precision relaxation.
+    // This function is used for compatibility only.
+}
+
+} // namespace driverTestHelpers
+
+BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_0)
+{
+    PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingValid);
+}
+
+BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_0)
+{
+    PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingSame);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/1.1/Convolution2D.cpp b/test/1.1/Convolution2D.cpp
new file mode 100644
index 0000000..32d5018
--- /dev/null
+++ b/test/1.1/Convolution2D.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../DriverTestHelpers.hpp"
+#include "../Convolution2D.hpp"
+#include "../../1.1/HalPolicy.hpp"
+
+#include <boost/test/unit_test.hpp>
+#include <log/log.h>
+
+#include <OperationsUtils.h>
+
+BOOST_AUTO_TEST_SUITE(Convolution2DTests)
+
+using namespace android::hardware;
+using namespace driverTestHelpers;
+using namespace armnn_driver;
+
+namespace driverTestHelpers
+{
+
+void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled)
+{
+    // Set the fp16 flag in the given model
+    model.relaxComputationFloat32toFloat16 = fp16Enabled;
+}
+
+} // namespace driverTestHelpers
+
+BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_1)
+{
+    PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid);
+}
+
+BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_1)
+{
+    PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame);
+}
+
+BOOST_AUTO_TEST_CASE(ConvValidPaddingFp16Flag_Hal_1_1)
+{
+    PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid, true);
+}
+
+BOOST_AUTO_TEST_CASE(ConvSamePaddingFp16Flag_Hal_1_1)
+{
+    PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame, true);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Android.mk b/test/Android.mk
index cab874c..c38b92e 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -44,10 +44,10 @@
 endif # PLATFORM_VERSION == 9
 
 LOCAL_SRC_FILES := \
+        1.0/Convolution2D.cpp \
         Tests.cpp \
         UtilsTests.cpp \
         Concurrent.cpp \
-        Convolution2D.cpp \
         FullyConnected.cpp \
         GenericLayerTests.cpp \
         DriverTestHelpers.cpp \
@@ -118,10 +118,11 @@
         -DARMNN_ANDROID_NN_V1_1
 
 LOCAL_SRC_FILES := \
+        1.0/Convolution2D.cpp \
+        1.1/Convolution2D.cpp \
         Tests.cpp \
         UtilsTests.cpp \
         Concurrent.cpp \
-        Convolution2D.cpp \
         FullyConnected.cpp \
         GenericLayerTests.cpp \
         DriverTestHelpers.cpp \
diff --git a/test/Convolution2D.cpp b/test/Convolution2D.hpp
similarity index 61%
rename from test/Convolution2D.cpp
rename to test/Convolution2D.hpp
index 3f097b8..ff417d9 100644
--- a/test/Convolution2D.cpp
+++ b/test/Convolution2D.hpp
@@ -2,7 +2,11 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
+#pragma once
+
 #include "DriverTestHelpers.hpp"
+
 #include <boost/test/unit_test.hpp>
 #include <log/log.h>
 
@@ -14,19 +18,29 @@
 using namespace driverTestHelpers;
 using namespace armnn_driver;
 
-namespace
+namespace driverTestHelpers
 {
 
-void PaddingTestImpl(android::nn::PaddingScheme paddingScheme)
+void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);
+
+#ifdef ARMNN_ANDROID_NN_V1_1
+void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled);
+#endif
+
+template<typename HalPolicy>
+void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled = false)
 {
-    auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-    neuralnetworks::V1_0::Model model  = {};
+    using HalModel         = typename HalPolicy::Model;
+    using HalOperationType = typename HalPolicy::OperationType;
+
+    auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc, fp16Enabled));
+    HalModel model = {};
 
     uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1;
 
     // add operands
-    float weightValue[] = {1, -1, 0, 1};
-    float biasValue[]   = {0};
+    float weightValue[] = {1.f, -1.f, 0.f, 1.f};
+    float biasValue[]   = {0.f};
 
     AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 3, 1});
     AddTensorOperand(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
@@ -39,11 +53,12 @@
 
     // make the convolution operation
     model.operations.resize(1);
-    model.operations[0].type = neuralnetworks::V1_0::OperationType::CONV_2D;
+    model.operations[0].type = HalOperationType::CONV_2D;
     model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
     model.operations[0].outputs = hidl_vec<uint32_t>{7};
 
     // make the prepared model
+    SetModelFp16Flag(model, fp16Enabled);
     android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
 
     // construct the request
@@ -67,44 +82,48 @@
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
-
     // set the input data (matching source test)
-    float indata[] = {4, 1, 0, 3, -1, 2};
+    float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f};
     AddPoolAndSetData(6, request, indata);
 
     // add memory for the output
     android::sp<IMemory> outMemory = AddPoolAndGetData(outSize, request);
-    float*               outdata   = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+    float* outdata = reinterpret_cast<float*>(static_cast<void*>(outMemory->getPointer()));
 
     // run the execution
     Execute(preparedModel, request);
 
     // check the result
-    if (paddingScheme == android::nn::kPaddingValid)
+    switch (paddingScheme)
     {
-        BOOST_TEST(outdata[0] == 2);
-    }
-    else if (paddingScheme == android::nn::kPaddingSame)
-    {
-        BOOST_TEST(outdata[0] == 2);
-        BOOST_TEST(outdata[1] == 0);
-    }
-    else
-    {
+    case android::nn::kPaddingValid:
+        if (fp16Enabled)
+        {
+            BOOST_TEST(outdata[0] == 1022.f);
+        }
+        else
+        {
+            BOOST_TEST(outdata[0] == 1022.25f);
+        }
+        break;
+    case android::nn::kPaddingSame:
+        if (fp16Enabled)
+        {
+            BOOST_TEST(outdata[0] == 1022.f);
+            BOOST_TEST(outdata[1] == 0.f);
+        }
+        else
+        {
+            BOOST_TEST(outdata[0] == 1022.25f);
+            BOOST_TEST(outdata[1] == 0.f);
+        }
+        break;
+    default:
         BOOST_TEST(false);
+        break;
     }
 }
 
-} // namespace <anonymous>
-
-BOOST_AUTO_TEST_CASE(ConvValidPadding)
-{
-    PaddingTestImpl(android::nn::kPaddingValid);
-}
-
-BOOST_AUTO_TEST_CASE(ConvSamePadding)
-{
-    PaddingTestImpl(android::nn::kPaddingSame);
-}
+} // namespace driverTestHelpers
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 1115491..ded2459 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -109,68 +109,11 @@
     memcpy(dst, data, size * sizeof(float));
 }
 
-void AddOperand(neuralnetworks::V1_0::Model& model, const Operand& op)
-{
-    model.operands.resize(model.operands.size() + 1);
-    model.operands[model.operands.size() - 1] = op;
-}
-
-void AddIntOperand(neuralnetworks::V1_0::Model& model, int32_t value)
-{
-    DataLocation location = {};
-    location.offset = model.operandValues.size();
-    location.length = sizeof(int32_t);
-
-    Operand op    = {};
-    op.type = OperandType::INT32;
-    op.dimensions = hidl_vec<uint32_t>{};
-    op.lifetime   = OperandLifeTime::CONSTANT_COPY;
-    op.location   = location;
-
-    model.operandValues.resize(model.operandValues.size() + location.length);
-    *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
-
-    AddOperand(model, op);
-}
-
-void AddInputOperand(neuralnetworks::V1_0::Model& model,
-                     hidl_vec<uint32_t> dimensions,
-                     neuralnetworks::V1_0::OperandType operandType)
-{
-    Operand op    = {};
-    op.type       = operandType;
-    op.dimensions = dimensions;
-    op.lifetime   = OperandLifeTime::MODEL_INPUT;
-
-    AddOperand(model, op);
-
-    model.inputIndexes.resize(model.inputIndexes.size() + 1);
-    model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
-}
-
-void AddOutputOperand(neuralnetworks::V1_0::Model& model,
-                      hidl_vec<uint32_t> dimensions,
-                      neuralnetworks::V1_0::OperandType operandType)
-{
-    Operand op = {};
-    op.type       = operandType;
-    op.scale      = operandType == neuralnetworks::V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
-    op.dimensions = dimensions;
-    op.lifetime   = OperandLifeTime::MODEL_OUTPUT;
-
-    AddOperand(model, op);
-
-    model.outputIndexes.resize(model.outputIndexes.size() + 1);
-    model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
-}
-
-
 android::sp<IPreparedModel> PrepareModelWithStatus(const neuralnetworks::V1_0::Model& model,
                                                    armnn_driver::ArmnnDriver& driver,
-                                                   ErrorStatus & prepareStatus,
+                                                   ErrorStatus& prepareStatus,
                                                    ErrorStatus expectedStatus)
 {
-
     android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
     driver.prepareModel(model, cb);
 
@@ -183,13 +126,27 @@
     return cb->GetPreparedModel();
 }
 
-android::sp<IPreparedModel> PrepareModel(const neuralnetworks::V1_0::Model& model,
-                                         armnn_driver::ArmnnDriver& driver)
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
+
+android::sp<IPreparedModel> PrepareModelWithStatus(const neuralnetworks::V1_1::Model& model,
+                                                   armnn_driver::ArmnnDriver& driver,
+                                                   ErrorStatus& prepareStatus,
+                                                   ErrorStatus expectedStatus)
 {
-    ErrorStatus prepareStatus = ErrorStatus::NONE;
-    return PrepareModelWithStatus(model, driver, prepareStatus);
+    android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
+    driver.prepareModel_1_1(model, neuralnetworks::V1_1::ExecutionPreference::LOW_POWER, cb);
+
+    prepareStatus = cb->GetErrorStatus();
+    BOOST_TEST(prepareStatus == expectedStatus);
+    if (expectedStatus == ErrorStatus::NONE)
+    {
+        BOOST_TEST((cb->GetPreparedModel() != nullptr));
+    }
+    return cb->GetPreparedModel();
 }
 
+#endif
+
 ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
                     const Request& request,
                     ErrorStatus expectedStatus)
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 03dbeb9..ce09ee6 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -10,6 +10,7 @@
 
 #include "../ArmnnDriver.hpp"
 #include <iosfwd>
+#include <boost/test/unit_test.hpp>
 
 namespace android
 {
@@ -72,9 +73,31 @@
 
 void AddPoolAndSetData(uint32_t size, Request& request, const float* data);
 
-void AddOperand(::android::hardware::neuralnetworks::V1_0::Model& model, const Operand& op);
+template<typename HalModel>
+void AddOperand(HalModel& model, const Operand& op)
+{
+    model.operands.resize(model.operands.size() + 1);
+    model.operands[model.operands.size() - 1] = op;
+}
 
-void AddIntOperand(::android::hardware::neuralnetworks::V1_0::Model& model, int32_t value);
+template<typename HalModel>
+void AddIntOperand(HalModel& model, int32_t value)
+{
+    DataLocation location = {};
+    location.offset = model.operandValues.size();
+    location.length = sizeof(int32_t);
+
+    Operand op    = {};
+    op.type       = OperandType::INT32;
+    op.dimensions = hidl_vec<uint32_t>{};
+    op.lifetime   = OperandLifeTime::CONSTANT_COPY;
+    op.location   = location;
+
+    model.operandValues.resize(model.operandValues.size() + location.length);
+    *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
+
+    AddOperand<HalModel>(model, op);
+}
 
 template<typename T>
 OperandType TypeToOperandType();
@@ -85,8 +108,8 @@
 template<>
 OperandType TypeToOperandType<int32_t>();
 
-template<typename T>
-void AddTensorOperand(::android::hardware::neuralnetworks::V1_0::Model& model,
+template<typename HalModel, typename T>
+void AddTensorOperand(HalModel& model,
                       hidl_vec<uint32_t> dimensions,
                       T* values,
                       OperandType operandType = OperandType::TENSOR_FLOAT32)
@@ -113,28 +136,67 @@
         *(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
     }
 
-    AddOperand(model, op);
+    AddOperand<HalModel>(model, op);
 }
 
-void AddInputOperand(::android::hardware::neuralnetworks::V1_0::Model& model,
+template<typename HalModel>
+void AddInputOperand(HalModel& model,
                      hidl_vec<uint32_t> dimensions,
-                     ::android::hardware::neuralnetworks::V1_0::OperandType operandType = OperandType::TENSOR_FLOAT32);
+                     OperandType operandType = OperandType::TENSOR_FLOAT32)
+{
+    Operand op    = {};
+    op.type       = operandType;
+    op.dimensions = dimensions;
+    op.lifetime   = OperandLifeTime::MODEL_INPUT;
 
-void AddOutputOperand(::android::hardware::neuralnetworks::V1_0::Model& model,
+    AddOperand<HalModel>(model, op);
+
+    model.inputIndexes.resize(model.inputIndexes.size() + 1);
+    model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
+}
+
+template<typename HalModel>
+void AddOutputOperand(HalModel& model,
                       hidl_vec<uint32_t> dimensions,
-                      ::android::hardware::neuralnetworks::V1_0::OperandType operandType = OperandType::TENSOR_FLOAT32);
+                      OperandType operandType = OperandType::TENSOR_FLOAT32)
+{
+    Operand op    = {};
+    op.type       = operandType;
+    op.scale      = operandType == OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
+    op.dimensions = dimensions;
+    op.lifetime   = OperandLifeTime::MODEL_OUTPUT;
 
-android::sp<IPreparedModel> PrepareModel(const ::android::hardware::neuralnetworks::V1_0::Model& model,
-                                         armnn_driver::ArmnnDriver& driver);
+    AddOperand<HalModel>(model, op);
+
+    model.outputIndexes.resize(model.outputIndexes.size() + 1);
+    model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
+}
 
 android::sp<IPreparedModel> PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_0::Model& model,
                                                    armnn_driver::ArmnnDriver& driver,
-                                                   ErrorStatus & prepareStatus,
-                                                   ErrorStatus expectedStatus=ErrorStatus::NONE);
+                                                   ErrorStatus& prepareStatus,
+                                                   ErrorStatus expectedStatus = ErrorStatus::NONE);
+
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
+
+android::sp<IPreparedModel> PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_1::Model& model,
+                                                   armnn_driver::ArmnnDriver& driver,
+                                                   ErrorStatus& prepareStatus,
+                                                   ErrorStatus expectedStatus = ErrorStatus::NONE);
+
+#endif
+
+template<typename HalModel>
+android::sp<IPreparedModel> PrepareModel(const HalModel& model,
+                                         armnn_driver::ArmnnDriver& driver)
+{
+    ErrorStatus prepareStatus = ErrorStatus::NONE;
+    return PrepareModelWithStatus(model, driver, prepareStatus);
+}
 
 ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
                     const Request& request,
-                    ErrorStatus expectedStatus=ErrorStatus::NONE);
+                    ErrorStatus expectedStatus = ErrorStatus::NONE);
 
 android::sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel,
                                              const Request& request);
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index 85e58d0..5c204ca 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -19,7 +19,7 @@
     // but that uses slightly weird dimensions which I don't think we need to support for now
 
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
-    neuralnetworks::V1_0::Model model = {};
+    V1_0::Model model = {};
 
     // add operands
     int32_t actValue      = 0;
@@ -34,7 +34,7 @@
 
     // make the fully connected operation
     model.operations.resize(1);
-    model.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+    model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
     model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3};
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
@@ -90,7 +90,7 @@
             sup = supported;
         };
 
-    neuralnetworks::V1_0::Model model = {};
+    V1_0::Model model = {};
 
     // operands
     int32_t actValue      = 0;
@@ -113,7 +113,7 @@
 
     model.operations.resize(1);
 
-    model.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+    model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
     model.operations[0].inputs  = hidl_vec<uint32_t>{0,1,2,3};
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
@@ -177,7 +177,7 @@
             sup = supported;
         };
 
-    neuralnetworks::V1_0::Model model = {};
+    V1_0::Model model = {};
 
     // operands
     int32_t actValue      = 0;
@@ -200,7 +200,7 @@
 
     model.operations.resize(1);
 
-    model.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+    model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
     model.operations[0].inputs  = hidl_vec<uint32_t>{0,1,2,3};
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index fd58a58..c66854f 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -216,7 +216,7 @@
 // during mem pool mapping we properly report an error to the framework via a callback
 BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
 {
-    auto driver = std::make_unique<ArmnnDriver>(armnn::Compute::CpuRef);
+    auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
     ErrorStatus errorStatus;
     std::vector<bool> supported;