blob: 72c7352a6552b20bef2ca6250f9f0acf48a564ab [file] [log] [blame]
Teresa Charlince655882023-11-21 15:44:13 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5// Copyright © 2020, 2023 The TensorFlow Authors. All Rights Reserved.
6// SPDX-License-Identifier: Apache-2.0
7//
8
9#include <numeric>
10#include "ResizeOperator.hpp"
11
12// This function is paraphrased from:
13// tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc from function convertResizeOp
14// tensorflow/lite/kernels/internal/reference/resize_utils.h
15TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* layer,
16 const std::vector<const TensorInfo*>& inputs,
17 const std::vector<const TensorInfo*>& outputs,
18 const ResizeDescriptor* resizeDescriptor)
19{
20 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( inputs.size() == 1,
21 "ConvertResizeToTosaOperator: Resize must have only one input." );
22 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( resizeDescriptor->m_DataLayout == DataLayout::NHWC,
23 "ConvertResizeToTosaOperator: NCHW not supported.");
24
25 ResizeMode mode;
26 if (resizeDescriptor->m_Method == ResizeMethod::NearestNeighbor)
27 {
28 mode = tosa::ResizeMode_NEAREST;
29 }
30 else if (resizeDescriptor->m_Method == ResizeMethod::Bilinear)
31 {
32 mode = tosa::ResizeMode_BILINEAR;
33 throw armnn::InvalidArgumentException("ConvertResizeToTosaOperator: Unimplemented Resize method.");
34 }
35 else
36 {
37 throw armnn::InvalidArgumentException("ConvertResizeToTosaOperator: Unsupported Resize method.");
38 }
39
40 std::string inputName = std::string("input0_");
41 std::string outputName = std::string("output0_");
42 std::string blockName = std::string("Op_RESIZE_block_") + GetUniqueTosaMappingID();
43
44 // If a layer is present then the block will be used for execution, so input and output names need to be determined
45 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
46 if(layer != nullptr)
47 {
48 // Get the layers connected to the input slots and determine unique tensor names.
49 Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
50 inputName = GenerateUniqueName(connectedLayer, 0);
51
52 // Determine unique output tensor name.
53 outputName = GenerateUniqueOutputName(*layer, 0);
54 }
55
56 int32_t inputHeight = static_cast<int32_t>(inputs[0]->GetShape()[1]);
57 int32_t inputWidth = static_cast<int32_t>(inputs[0]->GetShape()[2]);
58
59 int32_t outputHeight = static_cast<int32_t>(resizeDescriptor->m_TargetHeight);
60 int32_t outputWidth = static_cast<int32_t>(resizeDescriptor->m_TargetWidth);
61 bool alignCorners = resizeDescriptor->m_AlignCorners;
62 bool halfPixel = resizeDescriptor->m_HalfPixelCenters;
63
64 // Go from ArmNN parameters (outputShape, halfPixel and alignedCorners)
65 // to TOSA parameters (scale, offset and border)
66 // Align corners sets the scaling ratio to (O - 1)/(I - 1) rather than O / I.
67 auto preprocessResizeParameters = [&](int inputSize, int outputSize, int& scale_n, int& scale_d, int& offset)
68 {
69 // Dimension is length 1, we are just sampling from one value.
70 if (inputSize == 1)
71 {
72 scale_n = outputSize;
73 scale_d = 1;
74 offset = 0;
75 return;
76 }
77
78 // Apply if aligned and capable to be aligned.
79 // Align corners sets the scaling ratio to (OH - 1)/(IH - 1) rather than OH / IH. Same for width.
80 bool applyAligned = alignCorners && (outputSize > 1);
81 scale_n = applyAligned ? (outputSize - 1) : outputSize;
82 scale_d = applyAligned ? (inputSize - 1) : inputSize;
83
84 // Simplify the scales, make sure they are even values.
85 int gcd = std::gcd(scale_n, scale_d);
86 scale_n = 2 * scale_n / gcd;
87 scale_d = 2 * scale_d / gcd;
88
89 // If half pixel centers then input and output sampling positions are offset by 1/2 pixel.
90 offset = halfPixel ? (scale_d / 2 - scale_n / 2) : 0;
91
92 // Reduce the scaling ratio if possible, we know scale_n and scale_d are even
93 if ((offset & 1) == 0)
94 {
95 scale_n /= 2;
96 scale_d /= 2;
97 offset /= 2;
98 }
99 };
100
101 int scale_y_n, scale_y_d, offset_y;
102 int scale_x_n, scale_x_d, offset_x;
103 preprocessResizeParameters(inputHeight, outputHeight, scale_y_n, scale_y_d, offset_y);
104 preprocessResizeParameters(inputWidth, outputWidth, scale_x_n, scale_x_d, offset_x);
105
106 int border_y = scale_y_d * (outputHeight - 1) - scale_y_n * (inputHeight - 1) + offset_y;
107 int border_x = scale_x_d * (outputWidth - 1) - scale_x_n * (inputWidth - 1) + offset_x;
108
109 // [scale_y_n, scale_y_d, scale_x_n, scale_x_d]
110 std::vector<int16_t> scale = { static_cast<int16_t>(scale_y_n),
111 static_cast<int16_t>(scale_y_d),
112 static_cast<int16_t>(scale_x_n),
113 static_cast<int16_t>(scale_x_d) };
114
115 // [offset_y, offset_x]
116 std::vector<int16_t> offset = { static_cast<int16_t>(offset_y),
117 static_cast<int16_t>(offset_x) };
118 // [border_y, border_x]
119 std::vector<int16_t> border = { static_cast<int16_t>(border_y),
120 static_cast<int16_t>(border_x) };
121
122 auto isInt16Range = [](int x)
123 {
124 return (x <= std::numeric_limits<int16_t>::max()) && (x >= std::numeric_limits<int16_t>::min());
125 };
126
127 if (inputs[0]->IsQuantized())
128 {
129 // It isn't commonly seen these numbers aren't fit within 16 bits, and won't match TFLite reference.
130 if (!isInt16Range(scale_y_n) || !isInt16Range(scale_y_d) ||
131 !isInt16Range(scale_x_n) || !isInt16Range(scale_x_d) ||
132 !isInt16Range(offset_y) || !isInt16Range(offset_x) ||
133 !isInt16Range(border_y) || !isInt16Range(border_x))
134 {
135 throw armnn::Exception("ConvertResizeToTosaOperator: stride or offset out of 16 bit range");
136 }
137 }
138
139 TosaResizeAttribute resizeAttribute(scale, offset, border, mode);
140
141 auto* op = new TosaSerializationOperator(Op_RESIZE,
142 Attribute_ResizeAttribute,
143 &resizeAttribute,
144 {inputName},
145 {outputName});
146
147 std::vector<TosaSerializationTensor*> tensors;
148
149 // Only add input tensors if connected layer is an input layer.
150 // As intermediate or constant tensors will be created separately.
151 // There also can't be duplicate tensor.
152 if(inputName.find("input0_") != std::string::npos)
153 {
154 std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
155 DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
156
157 tensors.push_back(new TosaSerializationTensor(inputName, inputShape, inputDType, {}));
158 }
159
160 std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
161 DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
162
163 tensors.push_back(new TosaSerializationTensor(outputName, outputShape, outputDType, {}));
164
165 // operatorInputNames/operatorOutputNames ends up being the same as
166 // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
167 return new TosaSerializationBasicBlock(blockName, // name
168 mainName, // region name
169 {op}, // operators
170 tensors, // tensors
171 {inputName}, // inputs
172 {outputName}); // outputs
173}