blob: a7ca873831058ebf748caa5d392b64d68cb0df49 [file] [log] [blame]
Cathal Corbettbd18eab2022-11-15 12:56:16 +00001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "Pooling2DOperator.hpp"
7
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +00008TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Layer* layer,
9 const std::vector<const TensorInfo*>& inputs,
Cathal Corbettbd18eab2022-11-15 12:56:16 +000010 const std::vector<const TensorInfo*>& outputs,
Cathal Corbettbd18eab2022-11-15 12:56:16 +000011 const Pooling2dDescriptor* poolDescriptor)
12{
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +000013 std::string padInputName = std::string("input0_");
14 std::string padOutputName = std::string("intermediate0_") + GetUniqueTosaMappingID();
15 std::string poolOutputName = std::string("output0_");
16 std::string blockName = std::string("Op_AVG_POOL2D_block_") + GetUniqueTosaMappingID();
Cathal Corbettbd18eab2022-11-15 12:56:16 +000017
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +000018 // If a layer is present then the block will be used for execution, so input and output names need to be determined
19 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
20 if(layer != nullptr)
Cathal Corbettbd18eab2022-11-15 12:56:16 +000021 {
Kevin May5b58e312022-12-15 10:15:21 +000022 // Get the layers connected to the input slots and determine unique tensors names.
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +000023 Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
24 padInputName = GenerateUniqueName(connectedInputLayer, 0);
25
Kevin May5b58e312022-12-15 10:15:21 +000026 // Determine unique output tensor name.
Matthew Sloyanda6bf9e2022-12-14 10:16:27 +000027 poolOutputName = GenerateUniqueOutputName(*layer, 0);
Cathal Corbettbd18eab2022-11-15 12:56:16 +000028 }
29
30 std::vector<int> paddings;
31 if (poolDescriptor->m_DataLayout == DataLayout::NHWC)
32 {
33 paddings = {0,
34 0,
35 static_cast<int>(poolDescriptor->m_PadTop),
36 static_cast<int>(poolDescriptor->m_PadBottom),
37 static_cast<int>(poolDescriptor->m_PadLeft),
38 static_cast<int>(poolDescriptor->m_PadRight),
39 0,
40 0
41 };
42 }
43 else
44 {
45 paddings = {0,
46 0,
47 0,
48 0,
49 static_cast<int>(poolDescriptor->m_PadTop),
50 static_cast<int>(poolDescriptor->m_PadBottom),
51 static_cast<int>(poolDescriptor->m_PadLeft),
52 static_cast<int>(poolDescriptor->m_PadRight)
53 };
54 }
55
56 TosaPadAttribute padAttribute(paddings, 0, 0.0f);
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +000057 auto* opPad = new TosaSerializationOperator(Op_PAD,
58 Attribute_PadAttribute,
59 &padAttribute,
60 {padInputName},
61 {padOutputName});
Cathal Corbettbd18eab2022-11-15 12:56:16 +000062
63 std::vector<int> pad = {0, 0, 0, 0};
64 std::vector<int> kernel = {static_cast<int>(poolDescriptor->m_PoolHeight),
65 static_cast<int>(poolDescriptor->m_PoolWidth)};
66 std::vector<int> stride = {static_cast<int>(poolDescriptor->m_StrideY),
67 static_cast<int>(poolDescriptor->m_StrideX)};
68 TosaPoolAttribute poolAttribute(pad, kernel, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
69
Matthew Sloyanc5fe6e72022-11-25 16:10:00 +000070 auto* opPool = new TosaSerializationOperator(Op_AVG_POOL2D,
71 Attribute_PoolAttribute,
72 &poolAttribute,
73 {padOutputName},
74 {poolOutputName});
Cathal Corbettbd18eab2022-11-15 12:56:16 +000075
Matthew Sloyanda6bf9e2022-12-14 10:16:27 +000076 std::vector<TosaSerializationTensor*> tensors;
77
Cathal Corbettbd18eab2022-11-15 12:56:16 +000078 std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
79 DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
80
Matthew Sloyanda6bf9e2022-12-14 10:16:27 +000081 // Only add input tensors if connected layer is an input layer.
82 // As intermediate or constant tensors will be created separately.
83 // There also can't be duplicate tensor.
84 if(padInputName.find("input0_") != std::string::npos)
85 {
86 tensors.push_back(new TosaSerializationTensor(padInputName, inputShape, inputDType, {}));
87 }
88
Cathal Corbettbd18eab2022-11-15 12:56:16 +000089 std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
90 DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
91
92 std::vector<int32_t> intermediateShape;
93 if (poolDescriptor->m_DataLayout == DataLayout::NHWC)
94 {
95 intermediateShape = {inputShape[0],
96 inputShape[1] + paddings[2] + paddings[3],
97 inputShape[2] + paddings[4] + paddings[5],
98 inputShape[3]};
99 }
100 else
101 {
102 intermediateShape = {inputShape[0],
103 inputShape[1],
104 inputShape[2] + paddings[4] + paddings[5],
105 inputShape[3] + paddings[6] + paddings[7]};
106 }
107
Matthew Sloyanda6bf9e2022-12-14 10:16:27 +0000108 tensors.push_back(new TosaSerializationTensor(padOutputName, intermediateShape, inputDType, {}));
109 tensors.push_back(new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {}));
Cathal Corbettbd18eab2022-11-15 12:56:16 +0000110
111 // operatorInputNames/operatorOutputNames ends up being the same as
Cathal Corbettb30e6552022-12-07 11:50:50 +0000112 // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
Cathal Corbettbd18eab2022-11-15 12:56:16 +0000113 return new TosaSerializationBasicBlock(blockName, // name
Narumol Prangnawaratad323af2023-09-29 17:00:38 +0100114 mainName, // region name
Cathal Corbettbd18eab2022-11-15 12:56:16 +0000115 {opPad, opPool}, // operators
Matthew Sloyanda6bf9e2022-12-14 10:16:27 +0000116 tensors, // tensors
Cathal Corbettbd18eab2022-11-15 12:56:16 +0000117 {padInputName}, // inputs
118 {poolOutputName}); // outputs
119}