blob: a7948ae98dcddf87f8300d83cf88f69490564cb4 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
John Mcloughlin083586d2023-04-28 18:36:52 +01005#pragma once
6
7#include <OpaqueDelegateUtils.hpp>
8
9namespace armnnOpaqueDelegate
10{
11
12TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext* tfLiteContext,
14 TfLiteOpaqueNode* tfLiteNode,
15 int nodeIndex,
16 int32_t reduceOperatorCode)
17{
18 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
19 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
20
21 // Gather input indices and use to get input tensor.
22 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
23 const int* inputTensors;
24 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
29 nodeIndex);
30 return kTfLiteError;
31 }
32
33 const TfLiteOpaqueTensor* tfLiteInputTensor =
34 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
35 if (!IsValid(tfLiteContext, tfLiteInputTensor, reduceOperatorCode, nodeIndex))
36 {
37 return kTfLiteError;
38 }
39
40 const TfLiteOpaqueTensor* tfLiteAxisTensor =
41 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
42 if (!IsValid(tfLiteContext, tfLiteAxisTensor, reduceOperatorCode, nodeIndex))
43 {
44 return kTfLiteError;
45 }
46
47 // Gather output indices and use to get output tensors.
48 int numOutputs = 0;
49 const int* outputTensors;
50 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
51 {
52 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
53 tfLiteContext,
54 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
55 nodeIndex);
56 return kTfLiteError;
57 }
58
59 const TfLiteOpaqueTensor* tfLiteOutputTensor =
60 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
61 if (!IsValid(tfLiteContext, tfLiteOutputTensor, reduceOperatorCode, nodeIndex))
62 {
63 return kTfLiteError;
64 }
65
66 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
67 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
68
69 // Get const axis value from model and set it to descriptor.
70 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
71 auto* axisTensorData = static_cast<int*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
72
73 std::vector<int32_t> axis;
74 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
75 if (axisTensorData != nullptr)
76 {
77 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
78 {
79 axis.emplace_back(axisTensorData[i]);
80 }
81 }
82 else
83 {
84 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
85 {
86 axis.push_back(i);
87 }
88 }
89
90 // Convert the axis to unsigned int and remove duplicates.
91 unsigned int rank = inputTensorInfo.GetNumDimensions();
92 std::set<unsigned int> uniqueAxis;
93 std::transform(axis.begin(),
94 axis.end(),
95 std::inserter(uniqueAxis, uniqueAxis.begin()),
96 [rank](int i)->unsigned int{ return (i + rank) % rank; });
97
98 armnn::ReduceDescriptor desc;
99 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
100
101 auto* reducerParameters = reinterpret_cast<TfLiteReducerParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
102 desc.m_KeepDims = reducerParameters->keep_dims;
103 if (reduceOperatorCode == kTfLiteBuiltinReduceMax)
104 {
105 desc.m_ReduceOperation = armnn::ReduceOperation::Max;
106 }
107 else if (reduceOperatorCode == kTfLiteBuiltinReduceMin)
108 {
109 desc.m_ReduceOperation = armnn::ReduceOperation::Min;
110 }
111 else if (reduceOperatorCode == kTfLiteBuiltinSum)
112 {
113 desc.m_ReduceOperation = armnn::ReduceOperation::Sum;
114 }
115 else if (reduceOperatorCode == kTfLiteBuiltinReduceProd)
116 {
117 desc.m_ReduceOperation = armnn::ReduceOperation::Prod;
118 }
119 else
120 {
121 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
122 tfLiteContext,
123 "TfLiteArmnnOpaqueDelegate: Unsupported Reduction Operator #%d node #%d: ",
124 reduceOperatorCode, nodeIndex);
125 return kTfLiteError;
126 }
127
128 bool isSupported = false;
129 armnn::BackendId setBackend;
130 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
131 {
132 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("REDUCE",
133 tfLiteContext,
134 IsReduceSupported,
135 delegateData.m_Backends,
136 isSupported,
137 setBackend,
138 inputTensorInfo,
139 outInfo,
140 desc);
141 };
142
143 if (!delegateData.m_Network)
144 {
145 validateFunc(outputTensorInfo, isSupported);
146 return isSupported ? kTfLiteOk : kTfLiteError;
147 }
148
149 // Add an Reduce layer
Mike Kellya2806502023-08-03 10:42:11 +0100150 auto layerName = GetName(armnn::LayerType::Reduce, nodeIndex);
151 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc, layerName.c_str());
John Mcloughlin083586d2023-04-28 18:36:52 +0100152 layer->SetBackendId(setBackend);
153 ARMNN_ASSERT(layer != nullptr);
154
155 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
156 outputSlot.SetTensorInfo(outputTensorInfo);
157
158 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100159 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
John Mcloughlin083586d2023-04-28 18:36:52 +0100160 {
161 return kTfLiteError;
162 }
163
164 // Connect
165 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
166}
167
168} // namespace armnnOpaqueDelegate