IVGCVSW-1806 Restored the fp16 flag left behind during the previous
refactoring
* Split getCapabilities and getCapabilities_1_1 as it was before
* Setting relaxedFloat32toFloat16Performance when using HAL 1.1 as
required by one of the VTS test
Change-Id: Iff883b8cbd0511596e9848fa40e91e4fa58d4260
diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp
index 18e2596..9761311 100644
--- a/1.0/ArmnnDriver.hpp
+++ b/1.0/ArmnnDriver.hpp
@@ -8,6 +8,7 @@
#include <HalInterfaces.h>
#include "ArmnnDevice.hpp"
+#include "ArmnnDriverImpl.hpp"
#include "../ArmnnDriverImpl.hpp"
#include <log/log.h>
@@ -33,8 +34,8 @@
{
ALOGV("V1_0::ArmnnDriver::getCapabilities()");
- return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getCapabilities(m_Runtime,
- cb);
+ return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations(
diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp
new file mode 100644
index 0000000..925d9db
--- /dev/null
+++ b/1.0/ArmnnDriverImpl.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "ArmnnDriverImpl.hpp"
+#include "../SystemPropertiesUtils.hpp"
+
+#include <log/log.h>
+
+using namespace std;
+using namespace android;
+using namespace android::nn;
+using namespace android::hardware;
+
+namespace
+{
+
+const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
+const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+namespace V1_0
+{
+
+Return<void> ArmnnDriverImpl::getCapabilities(
+ const armnn::IRuntimePtr& runtime,
+ neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
+{
+ ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()");
+
+ neuralnetworks::V1_0::Capabilities capabilities;
+ if (runtime)
+ {
+ capabilities.float32Performance.execTime =
+ ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f);
+
+ capabilities.float32Performance.powerUsage =
+ ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f);
+
+ capabilities.quantized8Performance.execTime =
+ ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f);
+
+ capabilities.quantized8Performance.powerUsage =
+ ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f);
+
+ cb(ErrorStatus::NONE, capabilities);
+ }
+ else
+ {
+ capabilities.float32Performance.execTime = 0;
+ capabilities.float32Performance.powerUsage = 0;
+ capabilities.quantized8Performance.execTime = 0;
+ capabilities.quantized8Performance.powerUsage = 0;
+
+ cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+ }
+
+ return Void();
+}
+
+} // namespace armnn_driver::V1_0
+} // namespace armnn_driver
diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp
new file mode 100644
index 0000000..b44350d
--- /dev/null
+++ b/1.0/ArmnnDriverImpl.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn_driver
+{
+namespace V1_0
+{
+
+class ArmnnDriverImpl
+{
+public:
+ static Return<void> getCapabilities(
+ const armnn::IRuntimePtr& runtime,
+ ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb);
+};
+
+} // namespace armnn_driver::V1_0
+} // namespace armnn_driver
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index f55aad4..079d9cd 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -8,7 +8,9 @@
#include <HalInterfaces.h>
#include "ArmnnDevice.hpp"
+#include "ArmnnDriverImpl.hpp"
#include "../ArmnnDriverImpl.hpp"
+#include "../1.0/ArmnnDriverImpl.hpp"
#include <log/log.h>
@@ -33,8 +35,8 @@
{
ALOGV("V1_1::ArmnnDriver::getCapabilities()");
- return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getCapabilities(m_Runtime,
- cb);
+ return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations(
@@ -67,8 +69,8 @@
{
ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()");
- return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getCapabilities(m_Runtime,
- cb);
+ return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations_1_1(
@@ -103,7 +105,9 @@
m_ClTunedParameters,
m_Options,
model,
- cb);
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
}
Return<DeviceStatus> getStatus() override
diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp
new file mode 100644
index 0000000..1d063cb
--- /dev/null
+++ b/1.1/ArmnnDriverImpl.cpp
@@ -0,0 +1,73 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "ArmnnDriverImpl.hpp"
+#include "../SystemPropertiesUtils.hpp"
+
+#include <log/log.h>
+
+using namespace std;
+using namespace android;
+using namespace android::nn;
+using namespace android::hardware;
+
+namespace
+{
+
+const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
+const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
+const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+namespace V1_1
+{
+
+Return<void> ArmnnDriverImpl::getCapabilities_1_1(
+ const armnn::IRuntimePtr& runtime,
+ neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb)
+{
+ ALOGV("V1_1::ArmnnDriverImpl::getCapabilities()");
+
+ neuralnetworks::V1_1::Capabilities capabilities;
+ if (runtime)
+ {
+ capabilities.float32Performance.execTime =
+ ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f);
+
+ capabilities.float32Performance.powerUsage =
+ ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f);
+
+ capabilities.quantized8Performance.execTime =
+ ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f);
+
+ capabilities.quantized8Performance.powerUsage =
+ ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f);
+
+ capabilities.relaxedFloat32toFloat16Performance.execTime =
+ ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f);
+
+ cb(ErrorStatus::NONE, capabilities);
+ }
+ else
+ {
+ capabilities.float32Performance.execTime = 0;
+ capabilities.float32Performance.powerUsage = 0;
+ capabilities.quantized8Performance.execTime = 0;
+ capabilities.quantized8Performance.powerUsage = 0;
+ capabilities.relaxedFloat32toFloat16Performance.execTime = 0;
+
+ cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+ }
+
+ return Void();
+}
+
+} // namespace armnn_driver::V1_1
+} // namespace armnn_driver
diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp
new file mode 100644
index 0000000..c309b69
--- /dev/null
+++ b/1.1/ArmnnDriverImpl.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn_driver
+{
+namespace V1_1
+{
+
+class ArmnnDriverImpl
+{
+public:
+ static Return<void> getCapabilities_1_1(
+ const armnn::IRuntimePtr& runtime,
+ ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb);
+};
+
+} // namespace armnn_driver::V1_1
+} // namespace armnn_driver
diff --git a/Android.mk b/Android.mk
index 3845070..1fd7449 100644
--- a/Android.mk
+++ b/Android.mk
@@ -47,6 +47,7 @@
endif # ARMNN_DRIVER_DEBUG == 1
LOCAL_SRC_FILES := \
+ 1.0/ArmnnDriverImpl.cpp \
ArmnnDriverImpl.cpp \
DriverOptions.cpp \
ArmnnDevice.cpp \
@@ -118,6 +119,8 @@
endif # ARMNN_DRIVER_DEBUG == 1
LOCAL_SRC_FILES := \
+ 1.0/ArmnnDriverImpl.cpp \
+ 1.1/ArmnnDriverImpl.cpp \
ArmnnDriverImpl.cpp \
DriverOptions.cpp \
ArmnnDevice.cpp \
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 0298f3b..ce66e6d 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -26,11 +26,6 @@
namespace
{
-const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
-const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
-const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
-const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
-
void NotifyCallbackAndCheck(const sp<IPreparedModelCallback>& callback,
ErrorStatus errorStatus,
const sp<IPreparedModel>& preparedModelPtr)
@@ -59,43 +54,6 @@
{
template <typename HalVersion>
-Return<void> ArmnnDriverImpl<HalVersion>::getCapabilities(
- const armnn::IRuntimePtr& runtime,
- HalGetCapabilities_cb cb)
-{
- ALOGV("ArmnnDriverImpl::getCapabilities()");
-
- HalCapabilities capabilities;
- if (runtime)
- {
- capabilities.float32Performance.execTime =
- ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f);
-
- capabilities.float32Performance.powerUsage =
- ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f);
-
- capabilities.quantized8Performance.execTime =
- ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f);
-
- capabilities.quantized8Performance.powerUsage =
- ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f);
-
- cb(ErrorStatus::NONE, capabilities);
- }
- else
- {
- capabilities.float32Performance.execTime = 0;
- capabilities.float32Performance.powerUsage = 0;
- capabilities.quantized8Performance.execTime = 0;
- capabilities.quantized8Performance.powerUsage = 0;
-
- cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
- }
-
- return Void();
-}
-
-template <typename HalVersion>
Return<void> ArmnnDriverImpl<HalVersion>::getSupportedOperations(
const armnn::IRuntimePtr& runtime,
const DriverOptions& options,
@@ -281,7 +239,7 @@
// Class template specializations
template class ArmnnDriverImpl<HalVersion_1_0>;
-#ifdef ARMNN_ANDROID_NN_V1_1
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
template class ArmnnDriverImpl<HalVersion_1_1>;
#endif
diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp
index c060097..87da581 100644
--- a/ArmnnDriverImpl.hpp
+++ b/ArmnnDriverImpl.hpp
@@ -17,17 +17,13 @@
struct HalVersion_1_0
{
using Model = ::android::hardware::neuralnetworks::V1_0::Model;
- using Capabilities = ::android::hardware::neuralnetworks::V1_0::Capabilities;
- using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb;
using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb;
};
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
struct HalVersion_1_1
{
using Model = ::android::hardware::neuralnetworks::V1_1::Model;
- using Capabilities = ::android::hardware::neuralnetworks::V1_1::Capabilities;
- using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb;
using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb;
};
#endif
@@ -37,13 +33,8 @@
{
public:
using HalModel = typename HalVersion::Model;
- using HalCapabilities = typename HalVersion::Capabilities;
- using HalGetCapabilities_cb = typename HalVersion::getCapabilities_cb;
using HalGetSupportedOperations_cb = typename HalVersion::getSupportedOperations_cb;
- static Return<void> getCapabilities(
- const armnn::IRuntimePtr& runtime,
- HalGetCapabilities_cb cb);
static Return<void> getSupportedOperations(
const armnn::IRuntimePtr& runtime,
const DriverOptions& options,
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 6db32a0..6ae1cb5 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -653,7 +653,7 @@
}
}
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
template<typename HalVersion>
bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_1::Operation& operation)
{
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index c28ebdc..5ee6a34 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -56,7 +56,7 @@
private:
void Convert();
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
bool ConvertDiv(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
@@ -205,4 +205,4 @@
std::vector<android::nn::RunTimePoolInfo> m_MemPools;
};
-} // armnn_driver
\ No newline at end of file
+} // armnn_driver
diff --git a/RequestThread.cpp b/RequestThread.cpp
index 8e44d8d..c5c9bbf 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -134,7 +134,7 @@
// Class template specializations
template class RequestThread<HalVersion_1_0>;
-#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1.
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
template class RequestThread<HalVersion_1_1>;
#endif