Implement dynamic fusion softmax operator
- Return aux tensorInfo by get_aux_tensors() at runtime to init the aux
tensor with the right size.
- Keep softmax unfusable for this commit
- Hence, added Tensor3D to template writer arguments declaration, for sake of
keeping dynamic fusion softmax componenets' kernels matching their cl
counterparts.
Resolves: COMPMID-5523
Change-Id: I667f39545db925f667036ef448302c79a0330373
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/483924
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: bsgcomp <bsgcomp@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8986
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
index 022d468..b3ec393 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
@@ -41,7 +41,7 @@
// Create kernel from kernel source string
opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
_kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(code.name(),
- "" /* Program name: Used to as part of a unique string for built kernel cache. Not needed */,
+ code.name(), // program name has to be provided to differentiate between different unfusable components' kernels.
code.code(),
klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
code.build_options().options(),
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
index 7e427fe..cd21b10 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -355,12 +355,12 @@
return Status{};
}
-std::vector<std::pair<CLTensor *, AuxMemoryInfo>> ClWorkloadRuntime::get_auxiliary_tensors()
+std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> ClWorkloadRuntime::get_auxiliary_tensors()
{
- std::vector<std::pair<CLTensor *, AuxMemoryInfo>> aux_tensors;
+ std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> aux_tensors;
for(const auto &data : _impl->_aux_tensors.get_tensors())
{
- aux_tensors.emplace_back(data.tensor, data.memory_info);
+ aux_tensors.emplace_back(data.tensor, data.tensor_info, data.memory_info);
}
return aux_tensors;
}