blob: 85acb6b869242500a1b4c3dc6301a1ee72cbbc68 [file] [log] [blame]
Johan Alfven9070f0f2023-02-07 13:01:03 +01001# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Rickard Bolinbc6ee582022-11-04 08:24:29 +000016#
Tim Hall79d07d22020-04-27 18:20:16 +010017# Description:
18# Functions used to read from a TensorFlow Lite format file.
Diego Russoea6111a2020-04-14 18:41:58 +010019import os.path
Henrik G Olssonea9b23c2021-03-23 17:34:49 +010020import struct
21import sys
Tim Hall79d07d22020-04-27 18:20:16 +010022
23import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010024
Louis Verhaard678645b2020-06-15 15:22:47 +020025from .errors import InputFileError
Diego Russoe8a10452020-04-21 17:39:10 +010026from .nn_graph import Graph
27from .nn_graph import Subgraph
Louis Verhaarde8a5a782020-11-02 18:04:27 +010028from .operation import create_activation_function
Louis Verhaardaee5d752020-09-30 09:01:52 +020029from .operation import Op
Diego Russoea6111a2020-04-14 18:41:58 +010030from .operation import Operation
Patrik Gustavsson5e26eda2021-06-30 09:07:16 +020031from .reader_util import align_tensor_indices_to_nng
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020032from .reader_util import clone_and_reshape_tensor
33from .reader_util import decode_str
34from .reader_util import fixup_tensors
Johan Alfven9070f0f2023-02-07 13:01:03 +010035from .tensor import create_virtual_tensor
Diego Russoe8a10452020-04-21 17:39:10 +010036from .tensor import QuantizationParameters
37from .tensor import Tensor
38from .tflite.BuiltinOperator import BuiltinOperator
39from .tflite.Model import Model
40from .tflite_mapping import builtin_operator_map
41from .tflite_mapping import DataType
42from .tflite_mapping import datatype_map
43from .tflite_mapping import datatype_map_numpy
Tim Hall2180a172023-03-10 18:11:34 +000044from .tflite_mapping import optype_to_builtintype
Tim Hall79d07d22020-04-27 18:20:16 +010045
46
Tim Hall79d07d22020-04-27 18:20:16 +010047class TFLiteSubgraph:
48 def __init__(self, graph, subgraph):
49 self.graph = graph
50 self.name = decode_str(subgraph.Name())
51
52 self.tensors = []
53 for idx in range(subgraph.TensorsLength()):
54 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
55
Johan Alfven9070f0f2023-02-07 13:01:03 +010056 self.virtual_outputs = []
Tim Hall79d07d22020-04-27 18:20:16 +010057 for idx in range(subgraph.OperatorsLength()):
Tim Hallc8310b12020-06-17 14:53:11 +010058 self.parse_operator(idx, subgraph.Operators(idx))
Tim Hall79d07d22020-04-27 18:20:16 +010059
Tim Hallc8310b12020-06-17 14:53:11 +010060 self.outputs = self.get_tensors_from_indices_remove_duplicates(subgraph.OutputsAsNumpy(), "output")
61 self.inputs = self.get_tensors_from_indices_remove_duplicates(subgraph.InputsAsNumpy(), "input")
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020062 fixup_tensors(self.inputs, self.tensors)
Tim Hall79d07d22020-04-27 18:20:16 +010063
Johan Alfven9070f0f2023-02-07 13:01:03 +010064 self.outputs.extend(self.virtual_outputs)
65
Tim Hallc8310b12020-06-17 14:53:11 +010066 def get_tensors_from_indices_remove_duplicates(self, indices, warning_str):
67 tensors = []
68 for idx in indices:
69 tensor = self.tensors[idx]
70 if tensor not in tensors:
71 tensors.append(tensor)
72 else:
73 print(
74 "Warning: Subgraph {0} tensor ({1}) with idx = {2} already seen. Removing the duplicate.".format(
75 warning_str, tensor, idx
76 )
77 )
78
79 return tensors
80
Tim Hall79d07d22020-04-27 18:20:16 +010081 def parse_tensor(self, tens_data):
82 np_shape = tens_data.ShapeAsNumpy()
83 shape = list(np_shape) if type(np_shape) is np.ndarray else []
84 name = decode_str(tens_data.Name())
Dwight Lidmane05de452020-11-05 15:56:08 +010085 tens_dtype = tens_data.Type()
86 dtype = datatype_map[tens_dtype]
Tim Hall79d07d22020-04-27 18:20:16 +010087 tens = Tensor(shape, dtype, name)
Tim Hall79d07d22020-04-27 18:20:16 +010088 quant = tens_data.Quantization()
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +010089 tens.is_variable = tens_data.IsVariable()
Tim Hall79d07d22020-04-27 18:20:16 +010090
Tim Hall79d07d22020-04-27 18:20:16 +010091 tens.quantization = QuantizationParameters()
Tim Halle4e58e12020-05-08 09:50:21 +010092 if quant is not None:
Diego Russod0eee262020-04-23 18:14:37 +010093 tens.quantization.min = self.len1_array_to_scalar(quant.MinAsNumpy())
94 tens.quantization.max = self.len1_array_to_scalar(quant.MaxAsNumpy())
95 tens.quantization.scale_f32 = self.len1_array_to_scalar(quant.ScaleAsNumpy())
96 tens.quantization.zero_point = self.len1_array_to_scalar(quant.ZeroPointAsNumpy())
Fredrik Svedbergcc8569f2021-11-01 14:25:29 +010097 tens.quantization.quant_dim = quant.QuantizedDimension()
Tim Hall79d07d22020-04-27 18:20:16 +010098
99 if dtype == DataType.uint8:
100 tens.quantization.quant_min = 0
101 tens.quantization.quant_max = (1 << dtype.bits) - 1
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000102 elif dtype in (DataType.int8, DataType.int16, DataType.int32, DataType.int64):
Tim Hall79d07d22020-04-27 18:20:16 +0100103 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
104 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
Tim Hall79d07d22020-04-27 18:20:16 +0100105
106 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
107 tens.quantization = None
108
109 tens.values = None
110 buf = self.graph.buffers[tens_data.Buffer()]
Louis Verhaardf4e12be2020-12-18 14:23:06 +0100111 if buf is not None:
112 np_dtype = datatype_map_numpy[tens_dtype]
113 if dtype == DataType.string:
114 tens.values = np.array(buf.view(np_dtype))
115 else:
116 tens.values = np.array(buf.view(np_dtype).reshape(shape))
Tim Hall79d07d22020-04-27 18:20:16 +0100117 return tens
118
Tim Hallc8310b12020-06-17 14:53:11 +0100119 def parse_operator(self, op_index, op_data):
wilisa010a7d5ee2023-04-13 17:05:09 +0000120 op_type, opt_serializer, custom_code, indices, version = self.graph.operator_codes[op_data.OpcodeIndex()]
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200121 inputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.InputsAsNumpy()]
122 outputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.OutputsAsNumpy()]
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +0100123 intermediates = []
124 if op_data.IntermediatesLength():
125 intermediates = [self.tensors[idx] if idx != -1 else None for idx in op_data.IntermediatesAsNumpy()]
126
Tim Hall79d07d22020-04-27 18:20:16 +0100127 name = "unknown_op_name"
128 if len(outputs):
129 name = outputs[0].name
Patrik Gustavsson5e26eda2021-06-30 09:07:16 +0200130 inputs = align_tensor_indices_to_nng(op_type, indices, inputs)
Tim Hall79d07d22020-04-27 18:20:16 +0100131 op = Operation(op_type, name)
Tim Hallc8310b12020-06-17 14:53:11 +0100132 op.op_index = op_index
wilisa010a7d5ee2023-04-13 17:05:09 +0000133 op.version = version
Tim Hall79d07d22020-04-27 18:20:16 +0100134 op.inputs = inputs
135 op.outputs = outputs
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +0100136 op.intermediates = intermediates
Tim Hall79d07d22020-04-27 18:20:16 +0100137 for out in op.outputs:
138 out.ops = [op]
139
Johan Alfven9070f0f2023-02-07 13:01:03 +0100140 if op_type in (Op.AssignVariable, Op.CallOnce):
141 # All graph traversals are based on depth-first and the starting
142 # points are the subgraph output tensors. Because of this, operators
143 # like AssignVariable and CallOnce will not be visit when the
144 # graph is traversed and the ops are never handled. In order to
145 # fix that, the code base will have to be changed in several places.
146 # Until then this workaround is applied. A virtual output is added
147 # both to the operator and to the subgraph. By doing this the full
148 # graph is traversed correctly. The tensor is not used for anything
149 # else.
150 op.name = f"{op_type}_{op_index}"
151 tens = create_virtual_tensor(op.name)
152 op.set_output_tensor(tens)
153 self.virtual_outputs.append(tens)
154
Louis Verhaardaee5d752020-09-30 09:01:52 +0200155 if op.type.is_depthwise_conv2d_op() or op.type.is_conv2d_op() or op.type == Op.FullyConnected:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200156 if inputs[1].values is not None:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200157 if op.type == Op.FullyConnected:
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +0100158 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0), False)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200159 else:
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +0100160 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0), False)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200161 if op.type.needs_bias() and len(inputs) <= op_type.info.indices.biases[0]:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200162 # No Bias tensor
163 inputs.append(None)
Patrik Gustavssone2dbed22020-10-06 10:14:36 +0200164 if inputs[-1] and inputs[-1].values is not None:
Patrik Gustavsson34359582020-11-03 10:24:08 +0100165 # Since bias tensor is used for both bias and scale,
Johan Alfvén53605be2022-10-26 12:52:17 +0200166 # a clone with a unique equivalence_id is needed.
167 inputs[-1] = clone_and_reshape_tensor(inputs[-1], None, True)
Tim Hall79d07d22020-04-27 18:20:16 +0100168
169 if opt_serializer is not None:
Tim Hallc8310b12020-06-17 14:53:11 +0100170 op.attrs = opt_serializer.deserialize(op_data)
Tim Hall79d07d22020-04-27 18:20:16 +0100171
Johan Alfvén673683b2022-09-05 09:39:47 +0200172 if op_type == Op.While:
173 # Attach the actual nng subgraphs to the op
174 cond_subgraph_index = op.attrs["cond_subgraph_index"]
175 body_subgraph_index = op.attrs["body_subgraph_index"]
176 op.attrs["subgraph"] = (
177 self.graph.nng.subgraphs[cond_subgraph_index],
178 self.graph.nng.subgraphs[body_subgraph_index],
179 )
Johan Alfven9070f0f2023-02-07 13:01:03 +0100180 if op_type == Op.CallOnce:
181 # Attach the actual nng subgraphs to the op
182 init_subgraph_index = op.attrs["init_subgraph_index"]
183 op.attrs["subgraph"] = (self.graph.nng.subgraphs[init_subgraph_index],)
Johan Alfvén673683b2022-09-05 09:39:47 +0200184
Tim Hall2180a172023-03-10 18:11:34 +0000185 if op_type == Op.Reshape:
186 if "new_shape" in op.attrs["attribute_read_error"] and len(inputs) > 1:
187 # the "new_shape" attribute is optional if the new_shape tensor (inputs[1]) is specified. therefore,
188 # remove the attribute read error
189 op.attrs["attribute_read_error"].remove("new_shape")
Michael McGeagh7b245fd2020-07-31 12:50:57 +0100190
Louis Verhaardaee5d752020-09-30 09:01:52 +0200191 if op_type == Op.Cast:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200192 # Cast op should have "in/out_data_type" attribs add if missing
193 if "in_data_type" not in op.attrs:
194 op.attrs["in_data_type"] = inputs[0].dtype
195 if "out_data_type" not in op.attrs:
196 op.attrs["out_data_type"] = outputs[0].dtype
197
Tim Hall79d07d22020-04-27 18:20:16 +0100198 if "stride_w" in op.attrs:
199 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
200 if "filter_width" in op.attrs:
201 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
202 if "dilation_w_factor" in op.attrs:
203 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
204 if "depth_multiplier" in op.attrs:
205 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
206
Fredrik Svedbergbdf09f92020-11-18 11:30:21 +0100207 if op_type == Op.DepthwiseConv2DBias and op.attrs["depth_multiplier"] == 0:
208 # The depth multiplier is implicit and is calculated as weight channels / ifm channels
209 # Note however that the weights have been reshaped above.
210 # The original value is cached above in channel_multiplier
211 op.attrs["depth_multiplier"] = op.weights.shape[2] // op.ifm.shape[-1]
212
Johan Alfven301ca602023-04-13 11:49:11 +0200213 # The fused_activation_function attribute needs to be retained so that the
214 # tflite_writer can correctly pass through operators that run on the CPU.
215 # This is because the operator activation attribute is later converted to an
216 # NpuActivation which treats None and ReLU the same, thereby making it difficult
217 # for the tflite_writer to recover the original activation function.
218 faf = op.attrs.get("fused_activation_function", None)
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100219 if faf is not None:
220 op.activation = create_activation_function(faf)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200221 if custom_code is not None:
222 op.attrs["custom_code"] = custom_code
Tim Hall79d07d22020-04-27 18:20:16 +0100223
Tim Hall2180a172023-03-10 18:11:34 +0000224 # finally, report any missing attributes that could not be read during deserialize()
225 attribute_read_error = op.attrs["attribute_read_error"]
226 if len(attribute_read_error) != 0:
227 print(
228 f"Warning: Could not read the following attributes from {optype_to_builtintype(op.type)}"
229 f" '{op.name}' {opt_serializer.name} field: {', '.join(attribute_read_error)}"
230 )
231
Diego Russod0eee262020-04-23 18:14:37 +0100232 @staticmethod
233 def len1_array_to_scalar(arr):
234 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
235 # the input buffer. This is represented in Vela by using None.
236 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
237 # are converted to scalars
238 if isinstance(arr, int) and arr == 0:
239 return None
240 if len(arr) == 1:
241 return arr[0]
242 return arr
243
Tim Hall79d07d22020-04-27 18:20:16 +0100244
245class TFLiteGraph:
Michael McGeagh6f725262020-12-03 15:21:36 +0000246 def __init__(self, filename, batch_size, feed_dict, output_node_names, initialisation_nodes):
Tim Hall79d07d22020-04-27 18:20:16 +0100247
248 self.op_times = {}
249 if batch_size is None:
250 batch_size = 1
251 self.batch_size = batch_size
252 self.name = os.path.splitext(os.path.basename(filename))[0]
253 self.initialisation_nodes = initialisation_nodes
254
255 with open(filename, "rb") as f:
256 buf = bytearray(f.read())
257
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100258 try:
259 parsing_step = "parsing root"
260 model = Model.GetRootAsModel(buf, 0)
Tim Hall79d07d22020-04-27 18:20:16 +0100261
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100262 parsing_step = "parsing buffers length"
263 self.buffers = []
Tim Hall2f18e172023-04-06 21:01:58 +0100264 if not model.BuffersIsNone():
265 for idx in range(model.BuffersLength()):
266 parsing_step = f"parsing buffer {idx}"
267 buffer = model.Buffers(idx)
268 buffer_data = self.parse_buffer(buffer)
269 # buffers can be either; empty, or contain no data (zero length), or contain data (non-zero length).
270 # when a buffer is None it means that it is either empty or zero length, and an empty buffer
271 # will have DataIsNone() equal to true.
272 # we should detect zero length buffers and report a warning because the TFLite semantics for these
273 # types of buffers changed in TensorFlow 2.11, whereby they could result in runtime errors
274 if buffer_data is None and not buffer.DataIsNone():
275 print(
276 f"Warning: Input TensorFlow Lite network contains a zero length buffer (index = {idx})"
277 f" which is semantically not empty. However, it will be treated as an empty buffer."
278 )
279
280 self.buffers.append(buffer_data)
Tim Hall79d07d22020-04-27 18:20:16 +0100281
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100282 parsing_step = "parsing operator codes length"
283 self.operator_codes = []
284 for idx in range(model.OperatorCodesLength()):
285 parsing_step = f"parsing operator code {idx}"
286 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
Tim Hall79d07d22020-04-27 18:20:16 +0100287
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100288 parsing_step = "parsing subgraphs length"
289 self.subgraphs = []
Johan Alfvén673683b2022-09-05 09:39:47 +0200290
291 # Pre-allocate nng subgraphs - needed when parsing an operator and the operator
292 # has subgraph attributes.
293 self.nng = Graph(self.name, self.batch_size)
294 for idx in range(model.SubgraphsLength()):
295 sg = Subgraph()
296 self.nng.subgraphs.append(sg)
297
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100298 for idx in range(model.SubgraphsLength()):
299 parsing_step = f"parsing subgraph {idx}"
300 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
Tim Hall79d07d22020-04-27 18:20:16 +0100301
Johan Alfvén673683b2022-09-05 09:39:47 +0200302 for idx, tflite_sg in enumerate(self.subgraphs):
303 sg = self.nng.subgraphs[idx]
304 sg.name = tflite_sg.name
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100305 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
306 sg.output_tensors = tflite_sg.outputs
Johan Alfven9070f0f2023-02-07 13:01:03 +0100307 sg.virtual_outputs = tflite_sg.virtual_outputs
Tim Hall79d07d22020-04-27 18:20:16 +0100308
Henrik G Olssonea9b23c2021-03-23 17:34:49 +0100309 parsing_step = "parsing metadata length"
310 # Preserve the original metadata
311 for idx in range(model.MetadataLength()):
312 parsing_step = f"parsing metadata {idx}"
313 meta = model.Metadata(idx)
314 parsing_step = f"parsing metadata name of metadata {idx}"
315 name = meta.Name()
316 if name is not None:
317 parsing_step = f"parsing metadata {idx} ({name})"
318 buf_data = self.buffers[meta.Buffer()]
319 self.nng.metadata.append((name, buf_data))
320 except (struct.error, TypeError, RuntimeError) as e:
321 print(f'Error: Invalid tflite file. Got "{e}" while {parsing_step}.')
322 sys.exit(1)
Michael McGeagh22f74e12020-08-07 16:21:03 +0100323
Tim Hall79d07d22020-04-27 18:20:16 +0100324 def parse_buffer(self, buf_data):
325 if buf_data.DataLength() == 0:
326 return None
327 data = buf_data.DataAsNumpy()
328 return data
329
330 def parse_operator_code(self, code):
331 c = code.BuiltinCode()
Tim Hall42abec12021-02-04 21:31:57 +0000332 if c == 0:
333 c = code.DeprecatedBuiltinCode()
Tim Hallc30f4952020-06-15 20:47:35 +0100334 if c not in builtin_operator_map:
Michael McGeagh7a6f8432020-12-02 15:29:22 +0000335 raise InputFileError(
336 self.name, f"The input file contains operator code '{c}' which is currently not supported"
337 )
Patrik Gustavsson5e26eda2021-06-30 09:07:16 +0200338 op_type, ser, indices = builtin_operator_map[c]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200339 custom_code = None
Tim Hall79d07d22020-04-27 18:20:16 +0100340 if c == BuiltinOperator.CUSTOM:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200341 custom_code = decode_str(code.CustomCode())
wilisa010a7d5ee2023-04-13 17:05:09 +0000342 return op_type, ser, custom_code, indices, code.Version()
Tim Hall79d07d22020-04-27 18:20:16 +0100343
344
Michael McGeagh6f725262020-12-03 15:21:36 +0000345def read_tflite(filename, batch_size, feed_dict, output_node_names, initialisation_nodes):
Diego Russoea6111a2020-04-14 18:41:58 +0100346 tflite_graph = TFLiteGraph(filename, batch_size, feed_dict, output_node_names, initialisation_nodes)
Tim Hall79d07d22020-04-27 18:20:16 +0100347 nng = tflite_graph.nng
348 nng.refresh_after_modification()
349 return nng