blob: 4ee0b10cfe14663dedc3e427b3addd9fdb80022c [file] [log] [blame]
Narumol Prangnawarat9ef36142022-01-25 15:15:34 +00001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
7
8#include <armnnTestUtils/TensorCopyUtils.hpp>
9
10#include <cl/ClImportTensorHandle.hpp>
11#include <cl/ClImportTensorHandleFactory.hpp>
12#include <cl/ClTensorHandle.hpp>
13#include <cl/ClTensorHandleFactory.hpp>
14#include <cl/ICLTensorProxy.hpp>
15#include <cl/test/ClContextControlFixture.hpp>
16#include <cl/test/ClWorkloadFactoryHelper.hpp>
17
18#include <doctest/doctest.h>
19
20using namespace armnn;
21
22TEST_SUITE("ICLTensorProxyTests")
23{
24
25TEST_CASE_FIXTURE(ClContextControlFixture, "ICLTensorProxyTest")
26{
27 ClTensorHandleFactory handleFactory =
28 ClWorkloadFactoryHelper::GetTensorHandleFactory(ClWorkloadFactoryHelper::GetMemoryManager());
29
30 TensorInfo info({ 1, 3, 4, 1 }, DataType::Float32);
31
32 // create TensorHandle for memory import
33 auto handle = handleFactory.CreateTensorHandle(info, true);
34
35 std::vector<float> inputData
36 {
37 -5, -2, 1, 2,
38 3, 10, -20, 8,
39 0, -12, 7, -9
40 };
41
42 handle->Allocate();
43
44 CopyDataToITensorHandle(handle.get(), inputData.data());
45
46 // Get CLtensor
47 arm_compute::CLTensor& tensor = PolymorphicDowncast<ClTensorHandle*>(handle.get())->GetTensor();
48 ICLTensorProxy iclTensorProxy(&tensor);
49
50 // Check that the ICLTensorProxy get correct information from the delegate tensor
51 CHECK((iclTensorProxy.info() == tensor.info()));
52 CHECK((iclTensorProxy.buffer() == tensor.buffer()));
53 CHECK((iclTensorProxy.cl_buffer() == tensor.cl_buffer()));
54 CHECK((iclTensorProxy.quantization().scale == tensor.quantization().scale));
55 CHECK((iclTensorProxy.quantization().offset == tensor.quantization().offset));
56}
57
58TEST_CASE_FIXTURE(ClContextControlFixture, "ChangeICLTensorProxyExecutionTest")
59{
60 // Start execution with with copied tensor
61 ClTensorHandleFactory handleFactory =
62 ClWorkloadFactoryHelper::GetTensorHandleFactory(ClWorkloadFactoryHelper::GetMemoryManager());
63
64 TensorInfo info({ 1, 3, 4, 1 }, DataType::Float32);
65 unsigned int numElements = info.GetNumElements();
66
67 // create TensorHandle for memory import
68 auto handle = handleFactory.CreateTensorHandle(info, true);
69
70 std::vector<float> inputData
71 {
72 -5, -2, 1, 2,
73 3, 10, -20, 8,
74 0, -12, 7, -9
75 };
76
77 std::vector<float> ExpectedOutput
78 {
79 0, 0, 1, 2,
80 3, 10, 0, 8,
81 0, 0, 7, 0
82 };
83
84 handle->Allocate();
85
86 CopyDataToITensorHandle(handle.get(), inputData.data());
87
88 // Get CLtensor
89 arm_compute::CLTensor& tensor = PolymorphicDowncast<ClTensorHandle*>(handle.get())->GetTensor();
90
91 // Set a proxy tensor to allocated tensor
92 std::unique_ptr<ICLTensorProxy> iclTensorProxy;
93 iclTensorProxy = std::make_unique<ICLTensorProxy>(&tensor);
94
95 // Create and configure activation function
96 const arm_compute::ActivationLayerInfo act_info(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
97 arm_compute::CLActivationLayer act_func;
98 act_func.configure(iclTensorProxy.get(), nullptr, act_info);
99
100 act_func.run();
101 arm_compute::CLScheduler::get().sync();
102
103 std::vector<float> actualOutput(info.GetNumElements());
104
105 CopyDataFromITensorHandle(actualOutput.data(), handle.get());
106
107 // Validate result as expected output
108 for(unsigned int i = 0; i < numElements; ++i)
109 {
110 CHECK((actualOutput[i] == ExpectedOutput[i]));
111 }
112
113 // Change to execute with imported tensor
114 ClImportTensorHandleFactory importHandleFactory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
115 static_cast<MemorySourceFlags>(MemorySource::Malloc));
116 // create TensorHandle for memory import
117 auto importHandle = importHandleFactory.CreateTensorHandle(info);
118
119 // Get CLtensor
120 arm_compute::CLTensor& importTensor = PolymorphicDowncast<ClImportTensorHandle*>(importHandle.get())->GetTensor();
121
122 // Allocate user memory
123 const size_t totalBytes = importTensor.info()->total_size();
124 const size_t alignment =
125 arm_compute::CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
126 size_t space = totalBytes + alignment + alignment;
127 auto testData = std::make_unique<uint8_t[]>(space);
128 void* alignedPtr = testData.get();
129 CHECK(std::align(alignment, totalBytes, alignedPtr, space));
130
131 // Import memory
132 CHECK(importHandle->Import(alignedPtr, armnn::MemorySource::Malloc));
133
134 // Input with negative values
135 auto* typedPtr = reinterpret_cast<float*>(alignedPtr);
136 std::fill_n(typedPtr, numElements, -5.0f);
137
138 // Set the import Tensor to TensorProxy to change Tensor in the CLActivationLayer without calling configure function
139 iclTensorProxy->set(&importTensor);
140
141 // Execute function and sync
142 act_func.run();
143 arm_compute::CLScheduler::get().sync();
144
145 // Validate result by checking that the output has no negative values
146 for(unsigned int i = 0; i < numElements; ++i)
147 {
148 CHECK(typedPtr[i] == 0);
149 }
150}
151}