Rename Quantization/Dequantization kernels/operators to imperative mood
Renames the following kernels/functions
- [Cl|Cpu]DequantizationKernel -> [Cl|Cpu]DequantizeKernel
- [Cl|Cpu]Dequantization -> [Cl|Cpu]CpuDequantize
- [Cl|Cpu]QuantizationKernel -> [Cl|Cpu]QuantizeKernel
- [Cl|Cpu]Quantization -> [Cl|Cpu]Quantize
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ic3c5eb3b7fe28f807294d159830eef99c2dd6219
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5566
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/CL/functions/CLDequantizationLayer.cpp b/src/runtime/CL/functions/CLDequantizationLayer.cpp
index e0381f9..3b10401 100644
--- a/src/runtime/CL/functions/CLDequantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLDequantizationLayer.cpp
@@ -27,15 +27,15 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/CL/ICLKernel.h"
-#include "src/runtime/gpu/cl/operators/ClDequantization.h"
+#include "src/runtime/gpu/cl/operators/ClDequantize.h"
namespace arm_compute
{
struct CLDequantizationLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<opencl::ClDequantization> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClDequantize> op{ nullptr };
};
CLDequantizationLayer::CLDequantizationLayer()
@@ -54,13 +54,13 @@
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<opencl::ClDequantization>();
+ _impl->op = std::make_unique<opencl::ClDequantize>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return opencl::ClDequantization::validate(input, output);
+ return opencl::ClDequantize::validate(input, output);
}
void CLDequantizationLayer::run()
diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp
index 1f6ddb6..e6451b2 100644
--- a/src/runtime/CL/functions/CLQuantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/ICLKernel.h"
-#include "src/runtime/gpu/cl/operators/ClQuantization.h"
+#include "src/runtime/gpu/cl/operators/ClQuantize.h"
namespace arm_compute
{
struct CLQuantizationLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<opencl::ClQuantization> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClQuantize> op{ nullptr };
};
CLQuantizationLayer::CLQuantizationLayer()
@@ -53,13 +53,13 @@
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<opencl::ClQuantization>();
+ _impl->op = std::make_unique<opencl::ClQuantize>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return opencl::ClQuantization::validate(input, output);
+ return opencl::ClQuantize::validate(input, output);
}
void CLQuantizationLayer::run()
diff --git a/src/runtime/NEON/functions/NEDequantizationLayer.cpp b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
index 210fbe0..91e3759 100644
--- a/src/runtime/NEON/functions/NEDequantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/Tensor.h"
-#include "src/runtime/cpu/operators/CpuDequantization.h"
+#include "src/runtime/cpu/operators/CpuDequantize.h"
namespace arm_compute
{
struct NEDequantizationLayer::Impl
{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<cpu::CpuDequantization> op{ nullptr };
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuDequantize> op{ nullptr };
};
NEDequantizationLayer::NEDequantizationLayer()
@@ -47,13 +47,13 @@
{
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<cpu::CpuDequantization>();
+ _impl->op = std::make_unique<cpu::CpuDequantize>();
_impl->op->configure(input->info(), output->info());
}
Status NEDequantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return cpu::CpuDequantization::validate(input, output);
+ return cpu::CpuDequantize::validate(input, output);
}
void NEDequantizationLayer::run()
diff --git a/src/runtime/NEON/functions/NEQuantizationLayer.cpp b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
index 58ba687..e607917 100644
--- a/src/runtime/NEON/functions/NEQuantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
@@ -26,15 +26,15 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/Tensor.h"
-#include "src/runtime/cpu/operators/CpuQuantization.h"
+#include "src/runtime/cpu/operators/CpuQuantize.h"
namespace arm_compute
{
struct NEQuantizationLayer::Impl
{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<cpu::CpuQuantization> op{ nullptr };
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuQuantize> op{ nullptr };
};
NEQuantizationLayer::NEQuantizationLayer()
@@ -45,14 +45,14 @@
Status NEQuantizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return cpu::CpuQuantization::validate(input, output);
+ return cpu::CpuQuantize::validate(input, output);
}
void NEQuantizationLayer::configure(const ITensor *input, ITensor *output)
{
_impl->src = input;
_impl->dst = output;
- _impl->op = std::make_unique<cpu::CpuQuantization>();
+ _impl->op = std::make_unique<cpu::CpuQuantize>();
_impl->op->configure(input->info(), output->info());
}
diff --git a/src/runtime/cpu/operators/CpuDequantization.cpp b/src/runtime/cpu/operators/CpuDequantize.cpp
similarity index 77%
rename from src/runtime/cpu/operators/CpuDequantization.cpp
rename to src/runtime/cpu/operators/CpuDequantize.cpp
index 0a3f602..80a2e28 100644
--- a/src/runtime/cpu/operators/CpuDequantization.cpp
+++ b/src/runtime/cpu/operators/CpuDequantize.cpp
@@ -21,30 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/cpu/operators/CpuDequantization.h"
+#include "src/runtime/cpu/operators/CpuDequantize.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuDequantizationKernel.h"
+#include "src/core/cpu/kernels/CpuDequantizeKernel.h"
namespace arm_compute
{
namespace cpu
{
-void CpuDequantization::configure(const ITensorInfo *src, ITensorInfo *dst)
+void CpuDequantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::CpuDequantizationKernel>();
+ auto k = std::make_unique<kernels::CpuDequantizeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
}
-Status CpuDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::CpuDequantizationKernel::validate(src, dst);
+ return kernels::CpuDequantizeKernel::validate(src, dst);
}
-void CpuDequantization::run(ITensorPack &tensors)
+void CpuDequantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
prepare(tensors);
diff --git a/src/runtime/cpu/operators/CpuDequantization.h b/src/runtime/cpu/operators/CpuDequantize.h
similarity index 73%
rename from src/runtime/cpu/operators/CpuDequantization.h
rename to src/runtime/cpu/operators/CpuDequantize.h
index 22f8114..d1fb9e8 100644
--- a/src/runtime/cpu/operators/CpuDequantization.h
+++ b/src/runtime/cpu/operators/CpuDequantize.h
@@ -21,36 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZATION_H
-#define ARM_COMPUTE_CPU_DEQUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_H
+#define ARM_COMPUTE_CPU_DEQUANTIZE_H
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
#include "src/runtime/cpu/ICpuOperator.h"
-#include <memory>
-
namespace arm_compute
{
namespace cpu
{
-/** Basic function to run @ref kernels::CpuDequantizationKernel that dequantizes an input tensor */
-class CpuDequantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuDequantizeKernel that dequantizes an input tensor */
+class CpuDequantize : public ICpuOperator
{
public:
/** Default Constructor */
- CpuDequantization() = default;
+ CpuDequantize() = default;
/** Configure the kernel.
*
* @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantization
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Destination tensor info. Data type supported: F16/F32.
+ * Similar to @ref CpuDequantize::configure()
*
* @return a status
*/
@@ -61,4 +55,4 @@
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_DEQUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_H */
diff --git a/src/runtime/cpu/operators/CpuQuantization.cpp b/src/runtime/cpu/operators/CpuQuantize.cpp
similarity index 78%
rename from src/runtime/cpu/operators/CpuQuantization.cpp
rename to src/runtime/cpu/operators/CpuQuantize.cpp
index ede1385..5af7f63 100644
--- a/src/runtime/cpu/operators/CpuQuantization.cpp
+++ b/src/runtime/cpu/operators/CpuQuantize.cpp
@@ -22,34 +22,34 @@
* SOFTWARE.
*/
-#include "src/runtime/cpu/operators/CpuQuantization.h"
+#include "src/runtime/cpu/operators/CpuQuantize.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/cpu/kernels/CpuQuantizationKernel.h"
+#include "src/core/cpu/kernels/CpuQuantizeKernel.h"
namespace arm_compute
{
namespace cpu
{
-Status CpuQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status CpuQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizationKernel::validate(src, dst));
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuQuantizeKernel::validate(src, dst));
return Status{};
}
-void CpuQuantization::configure(ITensorInfo *src, ITensorInfo *dst)
+void CpuQuantize::configure(const ITensorInfo *src, ITensorInfo *dst)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Configure quantize kernel
- auto k = std::make_unique<kernels::CpuQuantizationKernel>();
+ auto k = std::make_unique<kernels::CpuQuantizeKernel>();
k->configure(src, dst);
_kernel = std::move(k);
}
-void CpuQuantization::run(ITensorPack &tensors)
+void CpuQuantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
diff --git a/src/runtime/cpu/operators/CpuQuantization.h b/src/runtime/cpu/operators/CpuQuantize.h
similarity index 69%
rename from src/runtime/cpu/operators/CpuQuantization.h
rename to src/runtime/cpu/operators/CpuQuantize.h
index 97f0c5f..09afffd 100644
--- a/src/runtime/cpu/operators/CpuQuantization.h
+++ b/src/runtime/cpu/operators/CpuQuantize.h
@@ -21,41 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_QUANTIZATION_H
-#define ARM_COMPUTE_CPU_QUANTIZATION_H
+#ifndef ARM_COMPUTE_CPU_QUANTIZE_H
+#define ARM_COMPUTE_CPU_QUANTIZE_H
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "src/core/cpu/ICpuKernel.h"
#include "src/runtime/cpu/ICpuOperator.h"
-#include <memory>
-
namespace arm_compute
{
namespace cpu
{
-/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) kernels:
- *
- *
- * -# @ref kernels::CpuQuantizationKernel
- *
- */
-class CpuQuantization : public ICpuOperator
+/** Basic function to run @ref kernels::CpuQuantizeKernel that dequantizes an input tensor */
+class CpuQuantize : public ICpuOperator
{
public:
/** Default Constructor */
- CpuQuantization() = default;
+ CpuQuantize() = default;
/** Set the input and output tensors.
*
* @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
* @param[out] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
*/
- void configure(ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantization
+ void configure(const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16
+ * Similar to @ref CpuQuantize::configure()
*
* @return a status
*/
@@ -66,4 +55,4 @@
};
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_QUANTIZATION_H */
+#endif /* ARM_COMPUTE_CPU_QUANTIZE_H */
diff --git a/src/runtime/gpu/cl/operators/ClDequantization.cpp b/src/runtime/gpu/cl/operators/ClDequantization.cpp
deleted file mode 100644
index df3203d..0000000
--- a/src/runtime/gpu/cl/operators/ClDequantization.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/runtime/gpu/cl/operators/ClDequantization.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-void ClDequantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
-{
- auto k = std::make_unique<kernels::ClDequantizationKernel>();
- k->configure(compile_context, src, dst);
- _kernel = std::move(k);
-}
-
-Status ClDequantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- return kernels::ClDequantizationKernel::validate(src, dst);
-}
-
-void ClDequantization::run(ITensorPack &tensors)
-{
- ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
- CLScheduler::get().enqueue_op(*_kernel.get(), tensors);
-}
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClQuantization.cpp b/src/runtime/gpu/cl/operators/ClDequantize.cpp
similarity index 76%
copy from src/runtime/gpu/cl/operators/ClQuantization.cpp
copy to src/runtime/gpu/cl/operators/ClDequantize.cpp
index 2e753b5..0c1391b 100644
--- a/src/runtime/gpu/cl/operators/ClQuantization.cpp
+++ b/src/runtime/gpu/cl/operators/ClDequantize.cpp
@@ -21,30 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClQuantization.h"
+#include "src/runtime/gpu/cl/operators/ClDequantize.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h"
namespace arm_compute
{
namespace opencl
{
-void ClQuantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClDequantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::ClQuantizationKernel>();
+ auto k = std::make_unique<kernels::ClDequantizeKernel>();
k->configure(compile_context, src, dst);
_kernel = std::move(k);
}
-Status ClQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClDequantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::ClQuantizationKernel::validate(src, dst);
+ return kernels::ClDequantizeKernel::validate(src, dst);
}
-void ClQuantization::run(ITensorPack &tensors)
+void ClDequantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
CLScheduler::get().enqueue_op(*_kernel.get(), tensors);
diff --git a/src/runtime/gpu/cl/operators/ClDequantization.h b/src/runtime/gpu/cl/operators/ClDequantize.h
similarity index 77%
rename from src/runtime/gpu/cl/operators/ClDequantization.h
rename to src/runtime/gpu/cl/operators/ClDequantize.h
index a696b73..47fad3e 100644
--- a/src/runtime/gpu/cl/operators/ClDequantization.h
+++ b/src/runtime/gpu/cl/operators/ClDequantize.h
@@ -21,10 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_H
-#define ARM_COMPUTE_CL_DEQUANTIZATION_H
+#ifndef ARM_COMPUTE_CL_DEQUANTIZE_H
+#define ARM_COMPUTE_CL_DEQUANTIZE_H
-#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/runtime/gpu/cl/IClOperator.h"
@@ -32,12 +31,12 @@
{
namespace opencl
{
-/** Basic function to run @ref kernels::ClDequantizationKernel that dequantizes an input tensor */
-class ClDequantization : public IClOperator
+/** Basic function to run @ref kernels::ClDequantizeKernel that dequantizes an input tensor */
+class ClDequantize : public IClOperator
{
public:
/** Constructor */
- ClDequantization() = default;
+ ClDequantize() = default;
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -45,10 +44,9 @@
* @param[out] dst Destination tensor info with the same dimensions of @p src. Data type supported: F16/F32.
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[in] dst Output tensor info. Data type supported: F16/F32.
+ * Similar to @ref ClDequantize::configure()
*
* @return a status
*/
@@ -59,4 +57,4 @@
};
} // namespace opencl
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_DEQUANTIZATION_H */
+#endif /* ARM_COMPUTE_CL_DEQUANTIZE_H */
diff --git a/src/runtime/gpu/cl/operators/ClQuantization.cpp b/src/runtime/gpu/cl/operators/ClQuantize.cpp
similarity index 76%
rename from src/runtime/gpu/cl/operators/ClQuantization.cpp
rename to src/runtime/gpu/cl/operators/ClQuantize.cpp
index 2e753b5..92bbb62 100644
--- a/src/runtime/gpu/cl/operators/ClQuantization.cpp
+++ b/src/runtime/gpu/cl/operators/ClQuantize.cpp
@@ -21,30 +21,30 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/runtime/gpu/cl/operators/ClQuantization.h"
+#include "src/runtime/gpu/cl/operators/ClQuantize.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h"
+#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h"
namespace arm_compute
{
namespace opencl
{
-void ClQuantization::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
+void ClQuantize::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
{
- auto k = std::make_unique<kernels::ClQuantizationKernel>();
+ auto k = std::make_unique<kernels::ClQuantizeKernel>();
k->configure(compile_context, src, dst);
_kernel = std::move(k);
}
-Status ClQuantization::validate(const ITensorInfo *src, const ITensorInfo *dst)
+Status ClQuantize::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- return kernels::ClQuantizationKernel::validate(src, dst);
+ return kernels::ClQuantizeKernel::validate(src, dst);
}
-void ClQuantization::run(ITensorPack &tensors)
+void ClQuantize::run(ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
CLScheduler::get().enqueue_op(*_kernel.get(), tensors);
diff --git a/src/runtime/gpu/cl/operators/ClQuantization.h b/src/runtime/gpu/cl/operators/ClQuantize.h
similarity index 75%
rename from src/runtime/gpu/cl/operators/ClQuantization.h
rename to src/runtime/gpu/cl/operators/ClQuantize.h
index d938ff9..0b6d2c8 100644
--- a/src/runtime/gpu/cl/operators/ClQuantization.h
+++ b/src/runtime/gpu/cl/operators/ClQuantize.h
@@ -21,10 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CL_QUANTIZATION_H
-#define ARM_COMPUTE_CL_QUANTIZATION_H
+#ifndef ARM_COMPUTE_CL_QUANTIZE_H
+#define ARM_COMPUTE_CL_QUANTIZE_H
-#include "arm_compute/core/KernelDescriptors.h"
#include "src/core/gpu/cl/ClCompileContext.h"
#include "src/runtime/gpu/cl/IClOperator.h"
@@ -32,15 +31,12 @@
{
namespace opencl
{
-/** Basic function to quantize a tensor. This function calls the following OpenCL kernel:
- *
- * -# @ref kernels::ClQuantizationKernel
- */
-class ClQuantization : public IClOperator
+/** Basic function to run @ref kernels::ClQuantizeKernel that dequantizes an input tensor */
+class ClQuantize : public IClOperator
{
public:
/** Constructor */
- ClQuantization() = default;
+ ClQuantize() = default;
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
@@ -50,10 +46,9 @@
* @note Output auto initialization is not supported by this function
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer
+ /** Static function to check if given info will lead to a valid configuration
*
- * @param[in] src Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/32.
- * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
+ * Similar to @ref ClQuantize::configure()
*
* @return a status
*/
@@ -63,5 +58,5 @@
void run(ITensorPack &tensors) override;
};
} // namespace opencl
-} //namespace arm_compute
-#endif /* ARM_COMPUTE_CL_QUANTIZATION_H */
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_QUANTIZE_H */