blob: b3b0720a0eacb7a78cf74be75a5cc9f5f75ec5fd [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Functions used to read from a TensorFlow Lite format file.
Diego Russoea6111a2020-04-14 18:41:58 +010018import os.path
Tim Hall79d07d22020-04-27 18:20:16 +010019
20import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010021
Louis Verhaard678645b2020-06-15 15:22:47 +020022from .errors import InputFileError
Tim Hallc8310b12020-06-17 14:53:11 +010023from .errors import TensorError
Diego Russoe8a10452020-04-21 17:39:10 +010024from .nn_graph import Graph
25from .nn_graph import Subgraph
Louis Verhaarde8a5a782020-11-02 18:04:27 +010026from .operation import create_activation_function
Louis Verhaardaee5d752020-09-30 09:01:52 +020027from .operation import Op
Diego Russoea6111a2020-04-14 18:41:58 +010028from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010029from .tensor import QuantizationParameters
30from .tensor import Tensor
31from .tflite.BuiltinOperator import BuiltinOperator
32from .tflite.Model import Model
33from .tflite_mapping import builtin_operator_map
34from .tflite_mapping import DataType
35from .tflite_mapping import datatype_map
36from .tflite_mapping import datatype_map_numpy
Tim Hall79d07d22020-04-27 18:20:16 +010037
38
39def decode_str(s):
40 if s is None:
41 return ""
42 return s.decode("utf-8")
43
44
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +010045def clone_and_reshape_tensor(src_tens, reorder, set_unique):
46 tens = src_tens.clone("_reshape", set_unique)
Louis Verhaard3c07c972020-05-07 08:12:58 +020047 tens.shape = [src_tens.shape[idx] for idx in reorder]
48 tens.bandwidth_shape = tens.shape
49 tens.storage_shape = tens.shape
Tim Hall79d07d22020-04-27 18:20:16 +010050
Louis Verhaard3c07c972020-05-07 08:12:58 +020051 if tens.values is not None:
52 tens.values = tens.values.transpose(reorder)
Tim Hall79d07d22020-04-27 18:20:16 +010053
Louis Verhaard3c07c972020-05-07 08:12:58 +020054 if tens.quant_values is not None:
55 tens.quant_values = tens.quant_values.transpose(reorder)
56
Louis Verhaardaee5d752020-09-30 09:01:52 +020057 op = Operation(Op.Const, tens.name)
Michael McGeaghc5b549b2020-08-07 11:54:28 +010058 op.set_output_tensor(tens)
Louis Verhaard3c07c972020-05-07 08:12:58 +020059 return tens
Tim Hall79d07d22020-04-27 18:20:16 +010060
61
62class TFLiteSubgraph:
63 def __init__(self, graph, subgraph):
64 self.graph = graph
65 self.name = decode_str(subgraph.Name())
66
67 self.tensors = []
68 for idx in range(subgraph.TensorsLength()):
69 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
70
71 for idx in range(subgraph.OperatorsLength()):
Tim Hallc8310b12020-06-17 14:53:11 +010072 self.parse_operator(idx, subgraph.Operators(idx))
Tim Hall79d07d22020-04-27 18:20:16 +010073
Tim Hallc8310b12020-06-17 14:53:11 +010074 self.outputs = self.get_tensors_from_indices_remove_duplicates(subgraph.OutputsAsNumpy(), "output")
75 self.inputs = self.get_tensors_from_indices_remove_duplicates(subgraph.InputsAsNumpy(), "input")
Tim Hall79d07d22020-04-27 18:20:16 +010076
77 # Fix up tensors without operations. Generate either Placeholder or Constant ops
78 for tens in self.inputs:
Tim Hallc8310b12020-06-17 14:53:11 +010079 if tens.ops != []:
80 TensorError(tens, "This subgraph input tensor has unexpected driving operators.")
81
Louis Verhaardaee5d752020-09-30 09:01:52 +020082 op = Operation(Op.Placeholder, tens.name)
Michael McGeaghc5b549b2020-08-07 11:54:28 +010083 op.set_output_tensor(tens)
Tim Hall79d07d22020-04-27 18:20:16 +010084
85 for tens in self.tensors:
86 if not tens.ops:
Louis Verhaardaee5d752020-09-30 09:01:52 +020087 op = Operation(Op.Const, tens.name)
Michael McGeaghc5b549b2020-08-07 11:54:28 +010088 op.set_output_tensor(tens)
Tim Hall79d07d22020-04-27 18:20:16 +010089
Tim Hallc8310b12020-06-17 14:53:11 +010090 def get_tensors_from_indices_remove_duplicates(self, indices, warning_str):
91 tensors = []
92 for idx in indices:
93 tensor = self.tensors[idx]
94 if tensor not in tensors:
95 tensors.append(tensor)
96 else:
97 print(
98 "Warning: Subgraph {0} tensor ({1}) with idx = {2} already seen. Removing the duplicate.".format(
99 warning_str, tensor, idx
100 )
101 )
102
103 return tensors
104
Tim Hall79d07d22020-04-27 18:20:16 +0100105 def parse_tensor(self, tens_data):
106 np_shape = tens_data.ShapeAsNumpy()
107 shape = list(np_shape) if type(np_shape) is np.ndarray else []
108 name = decode_str(tens_data.Name())
109 dtype = datatype_map[tens_data.Type()]
Tim Hall79d07d22020-04-27 18:20:16 +0100110 tens = Tensor(shape, dtype, name)
Tim Hall79d07d22020-04-27 18:20:16 +0100111 quant = tens_data.Quantization()
112
Tim Hall79d07d22020-04-27 18:20:16 +0100113 tens.quantization = QuantizationParameters()
Tim Halle4e58e12020-05-08 09:50:21 +0100114 if quant is not None:
Diego Russod0eee262020-04-23 18:14:37 +0100115 tens.quantization.min = self.len1_array_to_scalar(quant.MinAsNumpy())
116 tens.quantization.max = self.len1_array_to_scalar(quant.MaxAsNumpy())
117 tens.quantization.scale_f32 = self.len1_array_to_scalar(quant.ScaleAsNumpy())
118 tens.quantization.zero_point = self.len1_array_to_scalar(quant.ZeroPointAsNumpy())
Tim Hall79d07d22020-04-27 18:20:16 +0100119
120 if dtype == DataType.uint8:
121 tens.quantization.quant_min = 0
122 tens.quantization.quant_max = (1 << dtype.bits) - 1
123 elif dtype in set((DataType.int8, DataType.int16, DataType.int32, DataType.int64)):
124 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
125 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
Tim Hall79d07d22020-04-27 18:20:16 +0100126
127 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
128 tens.quantization = None
129
130 tens.values = None
131 buf = self.graph.buffers[tens_data.Buffer()]
132 if buf is not None:
133 tens.values = np.array(buf.view(datatype_map_numpy[tens_data.Type()]).reshape(shape))
134 if tens.quantization is not None:
135 tens.quant_values = tens.values
136 tens.values = tens.quantization.dequantize(tens.quant_values)
137 return tens
138
Tim Hallc8310b12020-06-17 14:53:11 +0100139 def parse_operator(self, op_index, op_data):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200140 op_type, opt_serializer, custom_code = self.graph.operator_codes[op_data.OpcodeIndex()]
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200141 inputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.InputsAsNumpy()]
142 outputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.OutputsAsNumpy()]
Tim Hall79d07d22020-04-27 18:20:16 +0100143 name = "unknown_op_name"
144 if len(outputs):
145 name = outputs[0].name
146 op = Operation(op_type, name)
Tim Hallc8310b12020-06-17 14:53:11 +0100147 op.op_index = op_index
Tim Hall79d07d22020-04-27 18:20:16 +0100148 op.inputs = inputs
149 op.outputs = outputs
150 for out in op.outputs:
151 out.ops = [op]
152
Louis Verhaardaee5d752020-09-30 09:01:52 +0200153 if op.type.is_depthwise_conv2d_op() or op.type.is_conv2d_op() or op.type == Op.FullyConnected:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200154 if inputs[1].values is not None:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200155 if op.type == Op.FullyConnected:
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +0100156 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0), False)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200157 else:
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +0100158 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0), False)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200159 if op.type.needs_bias() and len(inputs) <= op_type.info.indices.biases[0]:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200160 # No Bias tensor
161 inputs.append(None)
Patrik Gustavssone2dbed22020-10-06 10:14:36 +0200162 if inputs[-1] and inputs[-1].values is not None:
Patrik Gustavsson34359582020-11-03 10:24:08 +0100163 # Since bias tensor is used for both bias and scale,
Patrik Gustavsson6ae0e422020-11-04 12:43:50 +0100164 # a clone with a unique equivalence_id is needed
165 inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,), True)
Tim Hall79d07d22020-04-27 18:20:16 +0100166
167 if opt_serializer is not None:
Tim Hallc8310b12020-06-17 14:53:11 +0100168 op.attrs = opt_serializer.deserialize(op_data)
Tim Hall79d07d22020-04-27 18:20:16 +0100169
Louis Verhaardaee5d752020-09-30 09:01:52 +0200170 if op_type == Op.Reshape and "new_shape" not in op.attrs:
Michael McGeagh7b245fd2020-07-31 12:50:57 +0100171 # Reshape should have an attrib "new_shape" but if it is missing, add it based on the output shape
172 op.attrs["new_shape"] = outputs[0].shape
173
Louis Verhaardaee5d752020-09-30 09:01:52 +0200174 if op_type == Op.Cast:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200175 # Cast op should have "in/out_data_type" attribs add if missing
176 if "in_data_type" not in op.attrs:
177 op.attrs["in_data_type"] = inputs[0].dtype
178 if "out_data_type" not in op.attrs:
179 op.attrs["out_data_type"] = outputs[0].dtype
180
Tim Hall79d07d22020-04-27 18:20:16 +0100181 if "stride_w" in op.attrs:
182 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
183 if "filter_width" in op.attrs:
184 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
185 if "dilation_w_factor" in op.attrs:
186 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
187 if "depth_multiplier" in op.attrs:
188 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
189
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100190 faf = op.attrs.pop("fused_activation_function", None)
191 if faf is not None:
192 op.activation = create_activation_function(faf)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200193 if custom_code is not None:
194 op.attrs["custom_code"] = custom_code
Tim Hall79d07d22020-04-27 18:20:16 +0100195
Diego Russod0eee262020-04-23 18:14:37 +0100196 @staticmethod
197 def len1_array_to_scalar(arr):
198 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
199 # the input buffer. This is represented in Vela by using None.
200 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
201 # are converted to scalars
202 if isinstance(arr, int) and arr == 0:
203 return None
204 if len(arr) == 1:
205 return arr[0]
206 return arr
207
Tim Hall79d07d22020-04-27 18:20:16 +0100208
209class TFLiteGraph:
210 def __init__(
Diego Russoea6111a2020-04-14 18:41:58 +0100211 self, filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100212 ):
213
214 self.op_times = {}
215 if batch_size is None:
216 batch_size = 1
217 self.batch_size = batch_size
218 self.name = os.path.splitext(os.path.basename(filename))[0]
219 self.initialisation_nodes = initialisation_nodes
220
221 with open(filename, "rb") as f:
222 buf = bytearray(f.read())
223
224 model = Model.GetRootAsModel(buf, 0)
225
226 self.buffers = []
227 for idx in range(model.BuffersLength()):
228 self.buffers.append(self.parse_buffer(model.Buffers(idx)))
229
230 self.operator_codes = []
231 for idx in range(model.OperatorCodesLength()):
232 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
233
234 self.subgraphs = []
235 for idx in range(model.SubgraphsLength()):
236 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
237
238 self.nng = Graph(self.name, self.batch_size)
239 for tflite_sg in self.subgraphs:
240 sg = Subgraph(tflite_sg.name)
241 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
242 sg.output_tensors = tflite_sg.outputs
243 self.nng.subgraphs.append(sg)
244
Michael McGeagh22f74e12020-08-07 16:21:03 +0100245 # Preserve the original metadata
246 for idx in range(model.MetadataLength()):
247 meta = model.Metadata(idx)
248 name = meta.Name()
249 if name is not None:
250 buf_data = self.buffers[meta.Buffer()]
251 self.nng.metadata.append((name, buf_data))
252
Tim Hall79d07d22020-04-27 18:20:16 +0100253 def parse_buffer(self, buf_data):
254 if buf_data.DataLength() == 0:
255 return None
256 data = buf_data.DataAsNumpy()
257 return data
258
259 def parse_operator_code(self, code):
260 c = code.BuiltinCode()
Tim Hallc30f4952020-06-15 20:47:35 +0100261 if c not in builtin_operator_map:
Louis Verhaard678645b2020-06-15 15:22:47 +0200262 msg = "The input file contains operator code {} which is currently not supported".format(c)
263 raise InputFileError(self.name, msg)
Tim Hall79d07d22020-04-27 18:20:16 +0100264 op_type, ser = builtin_operator_map[c]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200265 custom_code = None
Tim Hall79d07d22020-04-27 18:20:16 +0100266 if c == BuiltinOperator.CUSTOM:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200267 custom_code = decode_str(code.CustomCode())
268 return op_type, ser, custom_code
Tim Hall79d07d22020-04-27 18:20:16 +0100269
270
271def read_tflite(
Diego Russoea6111a2020-04-14 18:41:58 +0100272 filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100273):
Diego Russoea6111a2020-04-14 18:41:58 +0100274 tflite_graph = TFLiteGraph(filename, batch_size, feed_dict, output_node_names, initialisation_nodes)
Tim Hall79d07d22020-04-27 18:20:16 +0100275 nng = tflite_graph.nng
276 nng.refresh_after_modification()
277 return nng