blob: e5499022c6c9b04fa20c072de6e946ab20857719 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
John Mcloughlin559d9092023-04-26 20:14:47 +01005
6#include <OpaqueDelegateUtils.hpp>
7
8#include <tensorflow/lite/builtin_ops.h>
9#include <tensorflow/lite/c/builtin_op_data.h>
10#include <tensorflow/lite/c/common.h>
11#include <tensorflow/lite/minimal_logging.h>
12
13namespace armnnOpaqueDelegate
14{
15TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
16 TfLiteOpaqueContext* tfLiteContext,
17 TfLiteOpaqueNode* tfLiteNode,
18 int nodeIndex,
19 int32_t argMinMaxOperatorCode)
20{
21 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
22 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
23
24 // Gather input indices and use to get input tensor.
25 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
26 const int* inputTensors;
27 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
28 {
29 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
30 tfLiteContext,
31 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
32 nodeIndex);
33 return kTfLiteError;
34 }
35
36 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
37 if (!IsValid(tfLiteContext, tfLiteInputTensor, argMinMaxOperatorCode, nodeIndex))
38 {
39 return kTfLiteError;
40 }
41
42 // Use input indices to get filter tensor.
43 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
44 if(!IsValid(tfLiteAxisTensor))
45 {
46 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
47 tfLiteContext,
48 "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
49 argMinMaxOperatorCode, nodeIndex);
50 return kTfLiteError;
51 }
52
53 // Gather output indices and use to get output tensors.
54 int numOutputs = 0;
55 const int* outputTensors;
56 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
57 {
58 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
59 tfLiteContext,
60 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
61 nodeIndex);
62 return kTfLiteError;
63 }
64
65 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
John Mcloughlin0422cf22023-04-27 16:55:00 +010066 if (!IsValid(tfLiteContext, tfLiteOutputTensor, argMinMaxOperatorCode, nodeIndex))
John Mcloughlin559d9092023-04-26 20:14:47 +010067 {
68 return kTfLiteError;
69 }
70
71 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
72 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
73
74 // Get const axis value from model and set it to descriptor.
75 if (!IsValid(tfLiteContext, tfLiteAxisTensor, argMinMaxOperatorCode, nodeIndex))
76 {
77 return kTfLiteError;
78 }
79
80 armnn::ArgMinMaxDescriptor desc;
81 auto* axisData = static_cast<int*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
82 // Get the axis value from the input tensor
83 switch (TfLiteOpaqueTensorType(tfLiteAxisTensor))
84 {
85 case kTfLiteInt32:
86 case kTfLiteInt64:
87 desc.m_Axis = axisData[0];
88 break;
89 default:
90 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
91 tfLiteContext,
92 "TfLiteArmnnOpaqueDelegate: Axis value data type is not supported in operator #%d node #%d: ",
93 argMinMaxOperatorCode, nodeIndex);
94 return kTfLiteError;
95 }
96
97 // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
98 if (argMinMaxOperatorCode == kTfLiteBuiltinArgMax)
99 {
100 desc.m_Function = armnn::ArgMinMaxFunction::Max;
101 auto* argMaxParameters = reinterpret_cast<TfLiteArgMaxParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
102 if (argMaxParameters->output_type != kTfLiteInt32 && argMaxParameters->output_type != kTfLiteInt64)
103 {
104 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
105 tfLiteContext,
106 "TfLiteArmnnOpaqueDelegate: output_type data type is not supported in operator #%d node #%d: ",
107 argMinMaxOperatorCode, nodeIndex);
108 return kTfLiteError;
109 }
110 }
111 else
112 {
113 desc.m_Function = armnn::ArgMinMaxFunction::Min;
114 auto* argMinParameters = reinterpret_cast<TfLiteArgMinParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
115 if (argMinParameters->output_type != kTfLiteInt32 && argMinParameters->output_type != kTfLiteInt64)
116 {
117 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
118 tfLiteContext,
119 "TfLiteArmnnOpaqueDelegate: output_type data type is not supported in operator #%d node #%d: ",
120 argMinMaxOperatorCode, nodeIndex);
121 return kTfLiteError;
122 }
123 }
124
125 bool isSupported = false;
126 armnn::BackendId setBackend;
127 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
128 {
129 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ARGMINMAX",
130 tfLiteContext,
131 IsArgMinMaxSupported,
132 delegateData.m_Backends,
133 isSupported,
134 setBackend,
135 inputTensorInfo,
136 outInfo,
137 desc);
138 };
139
140 if (!delegateData.m_Network)
141 {
142 validateFunc(outputTensorInfo, isSupported);
143 return isSupported ? kTfLiteOk : kTfLiteError;
144 }
145
146 // Add an ArgMinMax layer
147 armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
148 layer->SetBackendId(setBackend);
149 ARMNN_ASSERT(layer != nullptr);
150
151 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
152 outputSlot.SetTensorInfo(outputTensorInfo);
153
154 // try to connect the Constant Inputs if there are any
155 if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
156 {
157 return kTfLiteError;
158 }
159
160 // Connect
161 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
162}
163
164}