blob: bc8ad5de5a2bc20b3bb4b4a526a4e7e0f151a9ec [file] [log] [blame]
Narumol Prangnawarat1a268962020-07-27 15:52:13 +01001//
Mike Kelly363b5722023-10-11 14:25:50 +01002// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat1a268962020-07-27 15:52:13 +01003// SPDX-License-Identifier: MIT
4//
Sadik Armagan76615a52020-08-04 14:01:05 +01005#include <Graph.hpp>
6#include <Network.hpp>
7
Narumol Prangnawarat1a268962020-07-27 15:52:13 +01008#include <neon/NeonTensorHandle.hpp>
9#include <neon/NeonTensorHandleFactory.hpp>
10
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan76615a52020-08-04 14:01:05 +010012#include <armnn/utility/PolymorphicDowncast.hpp>
13
Sadik Armagana097d2a2021-11-24 15:47:28 +000014#include <GraphUtils.hpp>
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +010015#include <arm_compute/runtime/Allocator.h>
Sadik Armagana097d2a2021-11-24 15:47:28 +000016#include <CommonTestUtils.hpp>
Sadik Armagan76615a52020-08-04 14:01:05 +010017
Sadik Armagan1625efc2021-06-10 18:24:34 +010018#include <doctest/doctest.h>
Keith Davis3674f142020-08-16 23:44:15 +010019#include <armnn/utility/Assert.hpp>
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010020
Sadik Armagan1625efc2021-06-10 18:24:34 +010021TEST_SUITE("NeonTensorHandleTests")
22{
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010023using namespace armnn;
24
Sadik Armagan1625efc2021-06-10 18:24:34 +010025TEST_CASE("NeonTensorHandleGetCapabilitiesNoPadding")
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010026{
27 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
28 NeonTensorHandleFactory handleFactory(memoryManager);
29
30 INetworkPtr network(INetwork::Create());
31
32 // Add the layers
33 IConnectableLayer* input = network->AddInputLayer(0);
34 SoftmaxDescriptor descriptor;
35 descriptor.m_Beta = 1.0f;
36 IConnectableLayer* softmax = network->AddSoftmaxLayer(descriptor);
37 IConnectableLayer* output = network->AddOutputLayer(2);
38
39 // Establish connections
40 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
41 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
42
43 // No padding required for input
44 std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
45 softmax,
46 CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010047 CHECK(capabilities.empty());
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010048
49 // No padding required for Softmax
50 capabilities = handleFactory.GetCapabilities(softmax, output, CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010051 CHECK(capabilities.empty());
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010052
53 // No padding required for output
54 capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010055 CHECK(capabilities.empty());
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010056}
57
Sadik Armagan1625efc2021-06-10 18:24:34 +010058TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010059{
60 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
61 NeonTensorHandleFactory handleFactory(memoryManager);
62
63 INetworkPtr network(INetwork::Create());
64
65 // Add the layers
66 IConnectableLayer* input = network->AddInputLayer(0);
67 Pooling2dDescriptor descriptor;
68 IConnectableLayer* pooling = network->AddPooling2dLayer(descriptor);
69 IConnectableLayer* output = network->AddOutputLayer(2);
70
71 // Establish connections
72 input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
73 pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
74
75 // No padding required for input
76 std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
77 pooling,
78 CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010079 CHECK(capabilities.empty());
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010080
81 // No padding required for output
82 capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010083 CHECK(capabilities.empty());
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010084
85 // Padding required for Pooling2d
86 capabilities = handleFactory.GetCapabilities(pooling, output, CapabilityClass::PaddingRequired);
Sadik Armagan1625efc2021-06-10 18:24:34 +010087 CHECK(capabilities.size() == 1);
88 CHECK((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
89 CHECK(capabilities[0].m_Value);
Narumol Prangnawarat1a268962020-07-27 15:52:13 +010090}
91
Sadik Armagan1625efc2021-06-10 18:24:34 +010092TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +010093{
94 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
95 std::make_unique<arm_compute::Allocator>(),
96 BaseMemoryManager::MemoryAffinity::Offset);
97 NeonTensorHandleFactory handleFactory(memoryManager);
98 TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
99
100 // create TensorHandle with memory managed
101 auto handle = handleFactory.CreateTensorHandle(info, true);
102 handle->Manage();
103 handle->Allocate();
104
105 memoryManager->Acquire();
106 {
107 float* buffer = reinterpret_cast<float*>(handle->Map());
Sadik Armagan1625efc2021-06-10 18:24:34 +0100108 CHECK(buffer != nullptr); // Yields a valid pointer
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100109 buffer[0] = 1.5f;
110 buffer[1] = 2.5f;
Sadik Armagan1625efc2021-06-10 18:24:34 +0100111 CHECK(buffer[0] == 1.5f); // Memory is writable and readable
112 CHECK(buffer[1] == 2.5f); // Memory is writable and readable
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100113 }
114 memoryManager->Release();
115
116 memoryManager->Acquire();
117 {
118 float* buffer = reinterpret_cast<float*>(handle->Map());
Sadik Armagan1625efc2021-06-10 18:24:34 +0100119 CHECK(buffer != nullptr); // Yields a valid pointer
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100120 buffer[0] = 3.5f;
121 buffer[1] = 4.5f;
Sadik Armagan1625efc2021-06-10 18:24:34 +0100122 CHECK(buffer[0] == 3.5f); // Memory is writable and readable
123 CHECK(buffer[1] == 4.5f); // Memory is writable and readable
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100124 }
125 memoryManager->Release();
126
127 float testPtr[2] = { 2.5f, 5.5f };
128 // Cannot import as import is disabled
Sadik Armagan1625efc2021-06-10 18:24:34 +0100129 CHECK_THROWS_AS(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100130}
131
Sadik Armagan1625efc2021-06-10 18:24:34 +0100132TEST_CASE("NeonTensorHandleFactoryImport")
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100133{
134 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
135 std::make_unique<arm_compute::Allocator>(),
136 BaseMemoryManager::MemoryAffinity::Offset);
137 NeonTensorHandleFactory handleFactory(memoryManager);
138 TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
139
140 // create TensorHandle without memory managed
141 auto handle = handleFactory.CreateTensorHandle(info, false);
142 handle->Manage();
143 handle->Allocate();
144 memoryManager->Acquire();
145
146 // No buffer allocated when import is enabled
Sadik Armagan1625efc2021-06-10 18:24:34 +0100147 CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100148
149 float testPtr[2] = { 2.5f, 5.5f };
150 // Correctly import
Sadik Armagan1625efc2021-06-10 18:24:34 +0100151 CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100152 float* buffer = reinterpret_cast<float*>(handle->Map());
Sadik Armagan1625efc2021-06-10 18:24:34 +0100153 CHECK(buffer != nullptr); // Yields a valid pointer after import
154 CHECK(buffer == testPtr); // buffer is pointing to testPtr
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100155 // Memory is writable and readable with correct value
Sadik Armagan1625efc2021-06-10 18:24:34 +0100156 CHECK(buffer[0] == 2.5f);
157 CHECK(buffer[1] == 5.5f);
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100158 buffer[0] = 3.5f;
159 buffer[1] = 10.0f;
Sadik Armagan1625efc2021-06-10 18:24:34 +0100160 CHECK(buffer[0] == 3.5f);
161 CHECK(buffer[1] == 10.0f);
Narumol Prangnawaratb8d771a2020-08-14 11:51:12 +0100162 memoryManager->Release();
163}
164
David Monahan0fa10502022-01-13 10:48:33 +0000165TEST_CASE("NeonTensorHandleCanBeImported")
166{
167 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
168 std::make_unique<arm_compute::Allocator>(),
169 BaseMemoryManager::MemoryAffinity::Offset);
170 NeonTensorHandleFactory handleFactory(memoryManager);
171 TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
172
173 // create TensorHandle (Memory Managed status is irrelevant)
174 auto handle = handleFactory.CreateTensorHandle(info, false);
175
176 // Create an aligned buffer
177 float alignedBuffer[2] = { 2.5f, 5.5f };
178 // Check aligned buffers return true
179 CHECK(handle->CanBeImported(&alignedBuffer, MemorySource::Malloc) == true);
180
181 // Create a misaligned buffer from the aligned one
182 float* misalignedBuffer = reinterpret_cast<float*>(reinterpret_cast<char*>(alignedBuffer) + 1);
183 // Check misaligned buffers return false
184 CHECK(handle->CanBeImported(static_cast<void*>(misalignedBuffer), MemorySource::Malloc) == false);
185}
186
Sadik Armagan1625efc2021-06-10 18:24:34 +0100187TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
Sadik Armaganab3bd4d2020-08-25 11:48:00 +0100188{
189 std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
190 NeonTensorHandleFactory handleFactory(memoryManager);
191
192 // NeonTensorHandleFactory supports InPlaceComputation
193 ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
194}
195
Sadik Armagan1625efc2021-06-10 18:24:34 +0100196}