blob: 7e158aace20a65043f68735c55a893276779f038 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Functions used to read from a TensorFlow Lite format file.
Diego Russoea6111a2020-04-14 18:41:58 +010018import os.path
Tim Hall79d07d22020-04-27 18:20:16 +010019
20import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010021
Diego Russoe8a10452020-04-21 17:39:10 +010022from .nn_graph import Graph
23from .nn_graph import Subgraph
Diego Russoea6111a2020-04-14 18:41:58 +010024from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010025from .tensor import QuantizationParameters
26from .tensor import Tensor
27from .tflite.BuiltinOperator import BuiltinOperator
28from .tflite.Model import Model
29from .tflite_mapping import builtin_operator_map
30from .tflite_mapping import DataType
31from .tflite_mapping import datatype_map
32from .tflite_mapping import datatype_map_numpy
Tim Hall79d07d22020-04-27 18:20:16 +010033
34
35def decode_str(s):
36 if s is None:
37 return ""
38 return s.decode("utf-8")
39
40
41def reshape_tensor_add_const_op(tens, reorder):
42 if not tens.reshaped:
43 original_shape = tens.shape
44 tens.name = tens.name + "_reshape"
45 tens.shape = [original_shape[idx] for idx in reorder]
46 tens.bandwidth_shape = tens.shape
47 tens.storage_shape = tens.shape
48
49 if tens.values is not None:
50 tens.values = tens.values.transpose(reorder)
51
52 if tens.quant_values is not None:
53 tens.quant_values = tens.quant_values.transpose(reorder)
54
55 op = Operation("Const", tens.name)
56 op.outputs = [tens]
57 tens.ops = [op]
58 tens.reshaped = True
59
60
61class TFLiteSubgraph:
62 def __init__(self, graph, subgraph):
63 self.graph = graph
64 self.name = decode_str(subgraph.Name())
65
66 self.tensors = []
67 for idx in range(subgraph.TensorsLength()):
68 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
69
70 for idx in range(subgraph.OperatorsLength()):
71 self.parse_operator(subgraph.Operators(idx))
72
73 self.outputs = [self.tensors[idx] for idx in subgraph.OutputsAsNumpy()]
74 self.inputs = [self.tensors[idx] for idx in subgraph.InputsAsNumpy()]
75
76 # Fix up tensors without operations. Generate either Placeholder or Constant ops
77 for tens in self.inputs:
78 assert not tens.ops
79 op = Operation("Placeholder", tens.name)
80 op.outputs = [tens]
81 tens.ops = [op]
82
83 for tens in self.tensors:
84 if not tens.ops:
85 op = Operation("Const", tens.name)
86 op.outputs = [tens]
87 tens.ops = [op]
88
89 def parse_tensor(self, tens_data):
90 np_shape = tens_data.ShapeAsNumpy()
91 shape = list(np_shape) if type(np_shape) is np.ndarray else []
92 name = decode_str(tens_data.Name())
93 dtype = datatype_map[tens_data.Type()]
Tim Hall79d07d22020-04-27 18:20:16 +010094 tens = Tensor(shape, dtype, name)
Tim Hall79d07d22020-04-27 18:20:16 +010095 quant = tens_data.Quantization()
96
Tim Hall79d07d22020-04-27 18:20:16 +010097 tens.quantization = QuantizationParameters()
Tim Halle4e58e12020-05-08 09:50:21 +010098 if quant is not None:
Diego Russod0eee262020-04-23 18:14:37 +010099 tens.quantization.min = self.len1_array_to_scalar(quant.MinAsNumpy())
100 tens.quantization.max = self.len1_array_to_scalar(quant.MaxAsNumpy())
101 tens.quantization.scale_f32 = self.len1_array_to_scalar(quant.ScaleAsNumpy())
102 tens.quantization.zero_point = self.len1_array_to_scalar(quant.ZeroPointAsNumpy())
Tim Hall79d07d22020-04-27 18:20:16 +0100103
104 if dtype == DataType.uint8:
105 tens.quantization.quant_min = 0
106 tens.quantization.quant_max = (1 << dtype.bits) - 1
107 elif dtype in set((DataType.int8, DataType.int16, DataType.int32, DataType.int64)):
108 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
109 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
Tim Hall79d07d22020-04-27 18:20:16 +0100110
111 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
112 tens.quantization = None
113
114 tens.values = None
115 buf = self.graph.buffers[tens_data.Buffer()]
116 if buf is not None:
117 tens.values = np.array(buf.view(datatype_map_numpy[tens_data.Type()]).reshape(shape))
118 if tens.quantization is not None:
119 tens.quant_values = tens.values
120 tens.values = tens.quantization.dequantize(tens.quant_values)
121 return tens
122
123 def parse_operator(self, op_data):
124 op_type, opt_serializer = self.graph.operator_codes[op_data.OpcodeIndex()]
125 inputs = [self.tensors[idx] for idx in op_data.InputsAsNumpy()]
126 outputs = [self.tensors[idx] for idx in op_data.OutputsAsNumpy()]
127 name = "unknown_op_name"
128 if len(outputs):
129 name = outputs[0].name
130 op = Operation(op_type, name)
131 op.inputs = inputs
132 op.outputs = outputs
133 for out in op.outputs:
134 out.ops = [op]
135
136 activation_function_to_split_out = None
137
138 if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
139 reshape_tensor_add_const_op(inputs[1], (1, 2, 3, 0))
140
141 if op_type.startswith("FullyConnected"):
142 reshape_tensor_add_const_op(inputs[1], (1, 0))
143
144 if opt_serializer is not None:
145 op.attrs = opt_serializer.deserialize(op_data.BuiltinOptions(), op_data.CustomOptionsAsNumpy())
146
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200147 if op_type.startswith("ResizeBilinear"):
148 upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
149 out_shape = op.outputs[0].shape[1:3]
150 if not op.attrs['align_corners'] and out_shape == upscaled_shape:
151 # this means the output is supposed to be a x2 upscale,
152 # so we need to do SAME padding
153 op.attrs.update({'padding': b'SAME'})
154 elif (op.attrs['align_corners']
155 and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]):
156 # here we can just run the avg pool without padding and
157 # produce a (M * 2 - 1, N * 2 - 1) sized output
158 op.attrs.update({'padding': b'VALID'})
159 else:
160 assert False, "Only 2x upscaling is supported"
161 op.attrs.update({'filter_width': 2, 'filter_height': 2, 'stride_w': 1, 'stride_h': 1,})
162
Tim Hall79d07d22020-04-27 18:20:16 +0100163 if "stride_w" in op.attrs:
164 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
165 if "filter_width" in op.attrs:
166 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
167 if "dilation_w_factor" in op.attrs:
168 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
169 if "depth_multiplier" in op.attrs:
170 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
171
172 if "fused_activation_function" in op.attrs:
173 if op_type in set(("ConcatTFLite",)):
174 act = op.attrs["fused_activation_function"]
175 del op.attrs["fused_activation_function"]
176 if act is not None:
177 activation_function_to_split_out = act
178
179 if activation_function_to_split_out is not None:
180 act_op = Operation(activation_function_to_split_out, name + activation_function_to_split_out)
181 out_tens = op.outputs[0]
182 intermediate_tens = out_tens.clone("_act_intermediate")
183 out_tens.ops = [act_op]
184 act_op.outputs = [out_tens]
185 intermediate_tens.ops = [op]
186 op.outputs[0] = intermediate_tens
187 act_op.inputs = [intermediate_tens]
188
Diego Russod0eee262020-04-23 18:14:37 +0100189 @staticmethod
190 def len1_array_to_scalar(arr):
191 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
192 # the input buffer. This is represented in Vela by using None.
193 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
194 # are converted to scalars
195 if isinstance(arr, int) and arr == 0:
196 return None
197 if len(arr) == 1:
198 return arr[0]
199 return arr
200
Tim Hall79d07d22020-04-27 18:20:16 +0100201
202class TFLiteGraph:
203 def __init__(
Diego Russoea6111a2020-04-14 18:41:58 +0100204 self, filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100205 ):
206
207 self.op_times = {}
208 if batch_size is None:
209 batch_size = 1
210 self.batch_size = batch_size
211 self.name = os.path.splitext(os.path.basename(filename))[0]
212 self.initialisation_nodes = initialisation_nodes
213
214 with open(filename, "rb") as f:
215 buf = bytearray(f.read())
216
217 model = Model.GetRootAsModel(buf, 0)
218
219 self.buffers = []
220 for idx in range(model.BuffersLength()):
221 self.buffers.append(self.parse_buffer(model.Buffers(idx)))
222
223 self.operator_codes = []
224 for idx in range(model.OperatorCodesLength()):
225 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
226
227 self.subgraphs = []
228 for idx in range(model.SubgraphsLength()):
229 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
230
231 self.nng = Graph(self.name, self.batch_size)
232 for tflite_sg in self.subgraphs:
233 sg = Subgraph(tflite_sg.name)
234 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
235 sg.output_tensors = tflite_sg.outputs
236 self.nng.subgraphs.append(sg)
237
238 def parse_buffer(self, buf_data):
239 if buf_data.DataLength() == 0:
240 return None
241 data = buf_data.DataAsNumpy()
242 return data
243
244 def parse_operator_code(self, code):
245 c = code.BuiltinCode()
246 op_type, ser = builtin_operator_map[c]
247 if c == BuiltinOperator.CUSTOM:
248 op_type += decode_str(code.CustomCode())
249 return op_type, ser
250
251
252def read_tflite(
Diego Russoea6111a2020-04-14 18:41:58 +0100253 filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100254):
Diego Russoea6111a2020-04-14 18:41:58 +0100255 tflite_graph = TFLiteGraph(filename, batch_size, feed_dict, output_node_names, initialisation_nodes)
Tim Hall79d07d22020-04-27 18:20:16 +0100256 nng = tflite_graph.nng
257 nng.refresh_after_modification()
258 return nng