blob: be47cb13a75035367489c5948766f08d186aa4bd [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Functions used to read from a TensorFlow Lite format file.
Diego Russoea6111a2020-04-14 18:41:58 +010018import os.path
Tim Hall79d07d22020-04-27 18:20:16 +010019
20import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010021
Louis Verhaard678645b2020-06-15 15:22:47 +020022from .errors import InputFileError
Tim Hallc8310b12020-06-17 14:53:11 +010023from .errors import TensorError
Diego Russoe8a10452020-04-21 17:39:10 +010024from .nn_graph import Graph
25from .nn_graph import Subgraph
Diego Russoea6111a2020-04-14 18:41:58 +010026from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010027from .tensor import QuantizationParameters
28from .tensor import Tensor
29from .tflite.BuiltinOperator import BuiltinOperator
30from .tflite.Model import Model
31from .tflite_mapping import builtin_operator_map
32from .tflite_mapping import DataType
33from .tflite_mapping import datatype_map
34from .tflite_mapping import datatype_map_numpy
Tim Hall79d07d22020-04-27 18:20:16 +010035
36
37def decode_str(s):
38 if s is None:
39 return ""
40 return s.decode("utf-8")
41
42
Louis Verhaard3c07c972020-05-07 08:12:58 +020043def clone_and_reshape_tensor(src_tens, reorder):
Tim Hall79d07d22020-04-27 18:20:16 +010044
Louis Verhaard3c07c972020-05-07 08:12:58 +020045 tens = src_tens.clone("_reshape")
46 tens.shape = [src_tens.shape[idx] for idx in reorder]
47 tens.bandwidth_shape = tens.shape
48 tens.storage_shape = tens.shape
Tim Hall79d07d22020-04-27 18:20:16 +010049
Louis Verhaard3c07c972020-05-07 08:12:58 +020050 if tens.values is not None:
51 tens.values = tens.values.transpose(reorder)
Tim Hall79d07d22020-04-27 18:20:16 +010052
Louis Verhaard3c07c972020-05-07 08:12:58 +020053 if tens.quant_values is not None:
54 tens.quant_values = tens.quant_values.transpose(reorder)
55
56 op = Operation("Const", tens.name)
57 op.outputs = [tens]
58 tens.ops = [op]
59
60 return tens
Tim Hall79d07d22020-04-27 18:20:16 +010061
62
63class TFLiteSubgraph:
64 def __init__(self, graph, subgraph):
65 self.graph = graph
66 self.name = decode_str(subgraph.Name())
67
68 self.tensors = []
69 for idx in range(subgraph.TensorsLength()):
70 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
71
72 for idx in range(subgraph.OperatorsLength()):
Tim Hallc8310b12020-06-17 14:53:11 +010073 self.parse_operator(idx, subgraph.Operators(idx))
Tim Hall79d07d22020-04-27 18:20:16 +010074
Tim Hallc8310b12020-06-17 14:53:11 +010075 self.outputs = self.get_tensors_from_indices_remove_duplicates(subgraph.OutputsAsNumpy(), "output")
76 self.inputs = self.get_tensors_from_indices_remove_duplicates(subgraph.InputsAsNumpy(), "input")
Tim Hall79d07d22020-04-27 18:20:16 +010077
78 # Fix up tensors without operations. Generate either Placeholder or Constant ops
79 for tens in self.inputs:
Tim Hallc8310b12020-06-17 14:53:11 +010080 if tens.ops != []:
81 TensorError(tens, "This subgraph input tensor has unexpected driving operators.")
82
Tim Hall79d07d22020-04-27 18:20:16 +010083 op = Operation("Placeholder", tens.name)
84 op.outputs = [tens]
85 tens.ops = [op]
86
87 for tens in self.tensors:
88 if not tens.ops:
89 op = Operation("Const", tens.name)
90 op.outputs = [tens]
91 tens.ops = [op]
92
Tim Hallc8310b12020-06-17 14:53:11 +010093 def get_tensors_from_indices_remove_duplicates(self, indices, warning_str):
94 tensors = []
95 for idx in indices:
96 tensor = self.tensors[idx]
97 if tensor not in tensors:
98 tensors.append(tensor)
99 else:
100 print(
101 "Warning: Subgraph {0} tensor ({1}) with idx = {2} already seen. Removing the duplicate.".format(
102 warning_str, tensor, idx
103 )
104 )
105
106 return tensors
107
Tim Hall79d07d22020-04-27 18:20:16 +0100108 def parse_tensor(self, tens_data):
109 np_shape = tens_data.ShapeAsNumpy()
110 shape = list(np_shape) if type(np_shape) is np.ndarray else []
111 name = decode_str(tens_data.Name())
112 dtype = datatype_map[tens_data.Type()]
Tim Hall79d07d22020-04-27 18:20:16 +0100113 tens = Tensor(shape, dtype, name)
Tim Hall79d07d22020-04-27 18:20:16 +0100114 quant = tens_data.Quantization()
115
Tim Hall79d07d22020-04-27 18:20:16 +0100116 tens.quantization = QuantizationParameters()
Tim Halle4e58e12020-05-08 09:50:21 +0100117 if quant is not None:
Diego Russod0eee262020-04-23 18:14:37 +0100118 tens.quantization.min = self.len1_array_to_scalar(quant.MinAsNumpy())
119 tens.quantization.max = self.len1_array_to_scalar(quant.MaxAsNumpy())
120 tens.quantization.scale_f32 = self.len1_array_to_scalar(quant.ScaleAsNumpy())
121 tens.quantization.zero_point = self.len1_array_to_scalar(quant.ZeroPointAsNumpy())
Tim Hall79d07d22020-04-27 18:20:16 +0100122
123 if dtype == DataType.uint8:
124 tens.quantization.quant_min = 0
125 tens.quantization.quant_max = (1 << dtype.bits) - 1
126 elif dtype in set((DataType.int8, DataType.int16, DataType.int32, DataType.int64)):
127 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
128 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
Tim Hall79d07d22020-04-27 18:20:16 +0100129
130 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
131 tens.quantization = None
132
133 tens.values = None
134 buf = self.graph.buffers[tens_data.Buffer()]
135 if buf is not None:
136 tens.values = np.array(buf.view(datatype_map_numpy[tens_data.Type()]).reshape(shape))
137 if tens.quantization is not None:
138 tens.quant_values = tens.values
139 tens.values = tens.quantization.dequantize(tens.quant_values)
140 return tens
141
Tim Hallc8310b12020-06-17 14:53:11 +0100142 def parse_operator(self, op_index, op_data):
Tim Hall79d07d22020-04-27 18:20:16 +0100143 op_type, opt_serializer = self.graph.operator_codes[op_data.OpcodeIndex()]
144 inputs = [self.tensors[idx] for idx in op_data.InputsAsNumpy()]
145 outputs = [self.tensors[idx] for idx in op_data.OutputsAsNumpy()]
146 name = "unknown_op_name"
147 if len(outputs):
148 name = outputs[0].name
149 op = Operation(op_type, name)
Tim Hallc8310b12020-06-17 14:53:11 +0100150 op.op_index = op_index
Tim Hall79d07d22020-04-27 18:20:16 +0100151 op.inputs = inputs
152 op.outputs = outputs
153 for out in op.outputs:
154 out.ops = [op]
155
156 activation_function_to_split_out = None
157
158 if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
Louis Verhaard3c07c972020-05-07 08:12:58 +0200159 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
Tim Hall79d07d22020-04-27 18:20:16 +0100160
161 if op_type.startswith("FullyConnected"):
Louis Verhaard3c07c972020-05-07 08:12:58 +0200162 inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
Tim Hall79d07d22020-04-27 18:20:16 +0100163
164 if opt_serializer is not None:
Tim Hallc8310b12020-06-17 14:53:11 +0100165 op.attrs = opt_serializer.deserialize(op_data)
Tim Hall79d07d22020-04-27 18:20:16 +0100166
Michael McGeagh7b245fd2020-07-31 12:50:57 +0100167 if op_type == "Reshape" and "new_shape" not in op.attrs:
168 # Reshape should have an attrib "new_shape" but if it is missing, add it based on the output shape
169 op.attrs["new_shape"] = outputs[0].shape
170
Tim Hall79d07d22020-04-27 18:20:16 +0100171 if "stride_w" in op.attrs:
172 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
173 if "filter_width" in op.attrs:
174 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
175 if "dilation_w_factor" in op.attrs:
176 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
177 if "depth_multiplier" in op.attrs:
178 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
179
180 if "fused_activation_function" in op.attrs:
181 if op_type in set(("ConcatTFLite",)):
182 act = op.attrs["fused_activation_function"]
183 del op.attrs["fused_activation_function"]
184 if act is not None:
185 activation_function_to_split_out = act
186
187 if activation_function_to_split_out is not None:
188 act_op = Operation(activation_function_to_split_out, name + activation_function_to_split_out)
189 out_tens = op.outputs[0]
190 intermediate_tens = out_tens.clone("_act_intermediate")
191 out_tens.ops = [act_op]
192 act_op.outputs = [out_tens]
193 intermediate_tens.ops = [op]
194 op.outputs[0] = intermediate_tens
195 act_op.inputs = [intermediate_tens]
196
Diego Russod0eee262020-04-23 18:14:37 +0100197 @staticmethod
198 def len1_array_to_scalar(arr):
199 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
200 # the input buffer. This is represented in Vela by using None.
201 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
202 # are converted to scalars
203 if isinstance(arr, int) and arr == 0:
204 return None
205 if len(arr) == 1:
206 return arr[0]
207 return arr
208
Tim Hall79d07d22020-04-27 18:20:16 +0100209
210class TFLiteGraph:
211 def __init__(
Diego Russoea6111a2020-04-14 18:41:58 +0100212 self, filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100213 ):
214
215 self.op_times = {}
216 if batch_size is None:
217 batch_size = 1
218 self.batch_size = batch_size
219 self.name = os.path.splitext(os.path.basename(filename))[0]
220 self.initialisation_nodes = initialisation_nodes
221
222 with open(filename, "rb") as f:
223 buf = bytearray(f.read())
224
225 model = Model.GetRootAsModel(buf, 0)
226
227 self.buffers = []
228 for idx in range(model.BuffersLength()):
229 self.buffers.append(self.parse_buffer(model.Buffers(idx)))
230
231 self.operator_codes = []
232 for idx in range(model.OperatorCodesLength()):
233 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
234
235 self.subgraphs = []
236 for idx in range(model.SubgraphsLength()):
237 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
238
239 self.nng = Graph(self.name, self.batch_size)
240 for tflite_sg in self.subgraphs:
241 sg = Subgraph(tflite_sg.name)
242 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
243 sg.output_tensors = tflite_sg.outputs
244 self.nng.subgraphs.append(sg)
245
246 def parse_buffer(self, buf_data):
247 if buf_data.DataLength() == 0:
248 return None
249 data = buf_data.DataAsNumpy()
250 return data
251
252 def parse_operator_code(self, code):
253 c = code.BuiltinCode()
Tim Hallc30f4952020-06-15 20:47:35 +0100254 if c not in builtin_operator_map:
Louis Verhaard678645b2020-06-15 15:22:47 +0200255 msg = "The input file contains operator code {} which is currently not supported".format(c)
256 raise InputFileError(self.name, msg)
Tim Hall79d07d22020-04-27 18:20:16 +0100257 op_type, ser = builtin_operator_map[c]
258 if c == BuiltinOperator.CUSTOM:
259 op_type += decode_str(code.CustomCode())
260 return op_type, ser
261
262
263def read_tflite(
Diego Russoea6111a2020-04-14 18:41:58 +0100264 filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100265):
Diego Russoea6111a2020-04-14 18:41:58 +0100266 tflite_graph = TFLiteGraph(filename, batch_size, feed_dict, output_node_names, initialisation_nodes)
Tim Hall79d07d22020-04-27 18:20:16 +0100267 nng = tflite_graph.nng
268 nng.refresh_after_modification()
269 return nng