COMPMID-1539 Implement YOLOLayer on CL

Change-Id: I332c0703e1399fca0c5b724529b54a28f49c88da
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146842
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 9455eff..9750ea9 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -46,46 +46,7 @@
 
     for(int i = 0; i < src.num_elements(); ++i)
     {
-        T x = src[i];
-
-        switch(info.activation())
-        {
-            case ActivationLayerInfo::ActivationFunction::ABS:
-                dst[i] = std::abs(x);
-                break;
-            case ActivationLayerInfo::ActivationFunction::LINEAR:
-                dst[i] = a * x + b;
-                break;
-            case ActivationLayerInfo::ActivationFunction::LOGISTIC:
-                dst[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
-                break;
-            case ActivationLayerInfo::ActivationFunction::RELU:
-                dst[i] = std::max<T>(static_cast<T>(0), x);
-                break;
-            case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
-                dst[i] = std::min<T>(a, std::max(static_cast<T>(0), x));
-                break;
-            case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
-                dst[i] = std::min<T>(a, std::max<T>(b, x));
-                break;
-            case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
-                dst[i] = (x > 0) ? x : a * x;
-                break;
-            case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
-                dst[i] = std::log(static_cast<T>(1) + std::exp(x));
-                break;
-            case ActivationLayerInfo::ActivationFunction::SQRT:
-                dst[i] = std::sqrt(x);
-                break;
-            case ActivationLayerInfo::ActivationFunction::SQUARE:
-                dst[i] = x * x;
-                break;
-            case ActivationLayerInfo::ActivationFunction::TANH:
-                dst[i] = a * std::tanh(b * x);
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Unsupported activation function");
-        }
+        dst[i] = activate_float<T>(src[i], a, b, info.activation());
     }
 
     return dst;
diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h
index 09f602f..c752e74 100644
--- a/tests/validation/reference/ActivationLayer.h
+++ b/tests/validation/reference/ActivationLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -35,6 +35,54 @@
 {
 namespace reference
 {
+template <typename T>
+inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction activation)
+{
+    T ret;
+
+    switch(activation)
+    {
+        case ActivationLayerInfo::ActivationFunction::ABS:
+            ret = std::abs(x);
+            break;
+        case ActivationLayerInfo::ActivationFunction::LINEAR:
+            ret = a * x + b;
+            break;
+        case ActivationLayerInfo::ActivationFunction::LOGISTIC:
+            ret = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
+            break;
+        case ActivationLayerInfo::ActivationFunction::RELU:
+            ret = std::max<T>(static_cast<T>(0), x);
+            break;
+        case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+            ret = std::min<T>(a, std::max(static_cast<T>(0), x));
+            break;
+        case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+            ret = std::min<T>(a, std::max<T>(b, x));
+            break;
+        case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+            ret = (x > 0) ? x : a * x;
+            break;
+        case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
+            ret = std::log(static_cast<T>(1) + std::exp(x));
+            break;
+        case ActivationLayerInfo::ActivationFunction::SQRT:
+            ret = std::sqrt(x);
+            break;
+        case ActivationLayerInfo::ActivationFunction::SQUARE:
+            ret = x * x;
+            break;
+        case ActivationLayerInfo::ActivationFunction::TANH:
+            ret = a * std::tanh(b * x);
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Unsupported activation function");
+            break;
+    }
+
+    return ret;
+}
+
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
 SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info);
 
diff --git a/tests/validation/reference/YOLOLayer.cpp b/tests/validation/reference/YOLOLayer.cpp
new file mode 100644
index 0000000..a12f411
--- /dev/null
+++ b/tests/validation/reference/YOLOLayer.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "YOLOLayer.h"
+
+#include "ActivationLayer.h"
+
+#include "arm_compute/core/Types.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
+SimpleTensor<T> yolo_layer(const SimpleTensor<T> &src, const ActivationLayerInfo &info, int32_t num_classes)
+{
+    // Create reference
+    SimpleTensor<T> dst{ src.shape(), src.data_type() };
+
+    // Compute reference
+    const T a(info.a());
+    const T b(info.b());
+
+    for(int i = 0; i < src.num_elements(); ++i)
+    {
+        const size_t z = index2coord(dst.shape(), i).z() % (num_classes + 5);
+
+        if(z != 2 && z != 3)
+        {
+            dst[i] = activate_float<T>(src[i], a, b, info.activation());
+        }
+        else
+        {
+            dst[i] = src[i];
+        }
+    }
+
+    return dst;
+}
+
+template <>
+SimpleTensor<uint8_t> yolo_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const ActivationLayerInfo &info, int32_t num_classes)
+{
+    SimpleTensor<float>   src_tmp = convert_from_asymmetric(src);
+    SimpleTensor<float>   dst_tmp = yolo_layer<float>(src_tmp, info, num_classes);
+    SimpleTensor<uint8_t> dst     = convert_to_asymmetric(dst_tmp, src.quantization_info());
+    return dst;
+}
+
+template SimpleTensor<float> yolo_layer(const SimpleTensor<float> &src, const ActivationLayerInfo &info, int32_t num_classes);
+template SimpleTensor<half> yolo_layer(const SimpleTensor<half> &src, const ActivationLayerInfo &info, int32_t num_classes);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/YOLOLayer.h b/tests/validation/reference/YOLOLayer.h
new file mode 100644
index 0000000..659f1dd
--- /dev/null
+++ b/tests/validation/reference/YOLOLayer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_YOLO_LAYER_H__
+#define __ARM_COMPUTE_TEST_YOLO_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
+SimpleTensor<T> yolo_layer(const SimpleTensor<T> &src, const ActivationLayerInfo &info, int32_t num_classes);
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+SimpleTensor<T> yolo_layer(const SimpleTensor<T> &src, const ActivationLayerInfo &info, int32_t num_classes);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_YOLO_LAYER_H__ */