IVGCVSW-7624 GpuFsa Op: Add Softmax operator

* Added softmax operator support
* Added test cases

Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com>
Change-Id: I51d530b110c4cb812f5aab31ad1ee4022d81d19e
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 85fb03a..98fb430 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -17,6 +17,7 @@
 #include "layers/GpuFsaElementwiseBinary.hpp"
 #include "layers/GpuFsaPooling2d.hpp"
 #include "layers/GpuFsaResize.hpp"
+#include "layers/GpuFsaSoftmax.hpp"
 #endif
 
 #include <vector>
@@ -206,6 +207,21 @@
                                         infos[0],
                                         *desc);
         }
+        case LayerType::Softmax:
+        {
+            if (infos.size() != 2)
+            {
+                throw InvalidArgumentException("Invalid number of Softmax TensorInfos. "
+                                               "TensorInfos should be of format: {input, output}.");
+            }
+
+            auto desc = PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor);
+            FORWARD_LAYER_VALIDATE_FUNC(GpuFsaSoftmaxValidate,
+                                        reasonIfUnsupported,
+                                        infos[0],
+                                        infos[1],
+                                        *desc);
+        }
         case LayerType::Constant:
         case LayerType::Input:
         case LayerType::Output: