blob: 850690f23e7467aac52f6aa6b9ff7eded760d366 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Functions used to read from a TensorFlow Lite format file.
Diego Russoea6111a2020-04-14 18:41:58 +010018import os.path
Tim Hall79d07d22020-04-27 18:20:16 +010019
20import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010021
Louis Verhaard7db78962020-05-25 15:05:26 +020022from .errors import UnsupportedFeatureError
Diego Russoe8a10452020-04-21 17:39:10 +010023from .nn_graph import Graph
24from .nn_graph import Subgraph
Diego Russoea6111a2020-04-14 18:41:58 +010025from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010026from .tensor import QuantizationParameters
27from .tensor import Tensor
28from .tflite.BuiltinOperator import BuiltinOperator
29from .tflite.Model import Model
30from .tflite_mapping import builtin_operator_map
31from .tflite_mapping import DataType
32from .tflite_mapping import datatype_map
33from .tflite_mapping import datatype_map_numpy
Tim Hall79d07d22020-04-27 18:20:16 +010034
35
36def decode_str(s):
37 if s is None:
38 return ""
39 return s.decode("utf-8")
40
41
42def reshape_tensor_add_const_op(tens, reorder):
43 if not tens.reshaped:
44 original_shape = tens.shape
45 tens.name = tens.name + "_reshape"
46 tens.shape = [original_shape[idx] for idx in reorder]
47 tens.bandwidth_shape = tens.shape
48 tens.storage_shape = tens.shape
49
50 if tens.values is not None:
51 tens.values = tens.values.transpose(reorder)
52
53 if tens.quant_values is not None:
54 tens.quant_values = tens.quant_values.transpose(reorder)
55
56 op = Operation("Const", tens.name)
57 op.outputs = [tens]
58 tens.ops = [op]
59 tens.reshaped = True
60
61
62class TFLiteSubgraph:
63 def __init__(self, graph, subgraph):
64 self.graph = graph
65 self.name = decode_str(subgraph.Name())
66
67 self.tensors = []
68 for idx in range(subgraph.TensorsLength()):
69 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
70
71 for idx in range(subgraph.OperatorsLength()):
72 self.parse_operator(subgraph.Operators(idx))
73
74 self.outputs = [self.tensors[idx] for idx in subgraph.OutputsAsNumpy()]
75 self.inputs = [self.tensors[idx] for idx in subgraph.InputsAsNumpy()]
76
77 # Fix up tensors without operations. Generate either Placeholder or Constant ops
78 for tens in self.inputs:
79 assert not tens.ops
80 op = Operation("Placeholder", tens.name)
81 op.outputs = [tens]
82 tens.ops = [op]
83
84 for tens in self.tensors:
85 if not tens.ops:
86 op = Operation("Const", tens.name)
87 op.outputs = [tens]
88 tens.ops = [op]
89
90 def parse_tensor(self, tens_data):
91 np_shape = tens_data.ShapeAsNumpy()
92 shape = list(np_shape) if type(np_shape) is np.ndarray else []
93 name = decode_str(tens_data.Name())
94 dtype = datatype_map[tens_data.Type()]
Tim Hall79d07d22020-04-27 18:20:16 +010095 tens = Tensor(shape, dtype, name)
Tim Hall79d07d22020-04-27 18:20:16 +010096 quant = tens_data.Quantization()
97
Tim Hall79d07d22020-04-27 18:20:16 +010098 tens.quantization = QuantizationParameters()
Tim Halle4e58e12020-05-08 09:50:21 +010099 if quant is not None:
Diego Russod0eee262020-04-23 18:14:37 +0100100 tens.quantization.min = self.len1_array_to_scalar(quant.MinAsNumpy())
101 tens.quantization.max = self.len1_array_to_scalar(quant.MaxAsNumpy())
102 tens.quantization.scale_f32 = self.len1_array_to_scalar(quant.ScaleAsNumpy())
103 tens.quantization.zero_point = self.len1_array_to_scalar(quant.ZeroPointAsNumpy())
Tim Hall79d07d22020-04-27 18:20:16 +0100104
105 if dtype == DataType.uint8:
106 tens.quantization.quant_min = 0
107 tens.quantization.quant_max = (1 << dtype.bits) - 1
108 elif dtype in set((DataType.int8, DataType.int16, DataType.int32, DataType.int64)):
109 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
110 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
Tim Hall79d07d22020-04-27 18:20:16 +0100111
112 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
113 tens.quantization = None
114
115 tens.values = None
116 buf = self.graph.buffers[tens_data.Buffer()]
117 if buf is not None:
118 tens.values = np.array(buf.view(datatype_map_numpy[tens_data.Type()]).reshape(shape))
119 if tens.quantization is not None:
120 tens.quant_values = tens.values
121 tens.values = tens.quantization.dequantize(tens.quant_values)
122 return tens
123
124 def parse_operator(self, op_data):
125 op_type, opt_serializer = self.graph.operator_codes[op_data.OpcodeIndex()]
126 inputs = [self.tensors[idx] for idx in op_data.InputsAsNumpy()]
127 outputs = [self.tensors[idx] for idx in op_data.OutputsAsNumpy()]
128 name = "unknown_op_name"
129 if len(outputs):
130 name = outputs[0].name
131 op = Operation(op_type, name)
132 op.inputs = inputs
133 op.outputs = outputs
134 for out in op.outputs:
135 out.ops = [op]
136
137 activation_function_to_split_out = None
138
139 if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
140 reshape_tensor_add_const_op(inputs[1], (1, 2, 3, 0))
141
142 if op_type.startswith("FullyConnected"):
143 reshape_tensor_add_const_op(inputs[1], (1, 0))
144
145 if opt_serializer is not None:
146 op.attrs = opt_serializer.deserialize(op_data.BuiltinOptions(), op_data.CustomOptionsAsNumpy())
147
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200148 if op_type.startswith("ResizeBilinear"):
149 upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
150 out_shape = op.outputs[0].shape[1:3]
Louis Verhaard7db78962020-05-25 15:05:26 +0200151 if not op.attrs["align_corners"] and out_shape == upscaled_shape:
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200152 # this means the output is supposed to be a x2 upscale,
153 # so we need to do SAME padding
Louis Verhaard7db78962020-05-25 15:05:26 +0200154 op.attrs.update({"padding": b"SAME"})
155 elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200156 # here we can just run the avg pool without padding and
157 # produce a (M * 2 - 1, N * 2 - 1) sized output
Louis Verhaard7db78962020-05-25 15:05:26 +0200158 op.attrs.update({"padding": b"VALID"})
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200159 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200160 raise UnsupportedFeatureError("ResizeBilinear: Only 2x upscaling is supported")
161 op.attrs.update({"filter_width": 2, "filter_height": 2, "stride_w": 1, "stride_h": 1})
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200162
Tim Hall79d07d22020-04-27 18:20:16 +0100163 if "stride_w" in op.attrs:
164 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
165 if "filter_width" in op.attrs:
166 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
167 if "dilation_w_factor" in op.attrs:
168 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
169 if "depth_multiplier" in op.attrs:
170 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
171
172 if "fused_activation_function" in op.attrs:
173 if op_type in set(("ConcatTFLite",)):
174 act = op.attrs["fused_activation_function"]
175 del op.attrs["fused_activation_function"]
176 if act is not None:
177 activation_function_to_split_out = act
178
179 if activation_function_to_split_out is not None:
180 act_op = Operation(activation_function_to_split_out, name + activation_function_to_split_out)
181 out_tens = op.outputs[0]
182 intermediate_tens = out_tens.clone("_act_intermediate")
183 out_tens.ops = [act_op]
184 act_op.outputs = [out_tens]
185 intermediate_tens.ops = [op]
186 op.outputs[0] = intermediate_tens
187 act_op.inputs = [intermediate_tens]
188
Diego Russod0eee262020-04-23 18:14:37 +0100189 @staticmethod
190 def len1_array_to_scalar(arr):
191 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
192 # the input buffer. This is represented in Vela by using None.
193 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
194 # are converted to scalars
195 if isinstance(arr, int) and arr == 0:
196 return None
197 if len(arr) == 1:
198 return arr[0]
199 return arr
200
Tim Hall79d07d22020-04-27 18:20:16 +0100201
202class TFLiteGraph:
203 def __init__(
Diego Russoea6111a2020-04-14 18:41:58 +0100204 self, filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100205 ):
206
207 self.op_times = {}
208 if batch_size is None:
209 batch_size = 1
210 self.batch_size = batch_size
211 self.name = os.path.splitext(os.path.basename(filename))[0]
212 self.initialisation_nodes = initialisation_nodes
213
214 with open(filename, "rb") as f:
215 buf = bytearray(f.read())
216
217 model = Model.GetRootAsModel(buf, 0)
218
219 self.buffers = []
220 for idx in range(model.BuffersLength()):
221 self.buffers.append(self.parse_buffer(model.Buffers(idx)))
222
223 self.operator_codes = []
224 for idx in range(model.OperatorCodesLength()):
225 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
226
227 self.subgraphs = []
228 for idx in range(model.SubgraphsLength()):
229 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
230
231 self.nng = Graph(self.name, self.batch_size)
232 for tflite_sg in self.subgraphs:
233 sg = Subgraph(tflite_sg.name)
234 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
235 sg.output_tensors = tflite_sg.outputs
236 self.nng.subgraphs.append(sg)
237
238 def parse_buffer(self, buf_data):
239 if buf_data.DataLength() == 0:
240 return None
241 data = buf_data.DataAsNumpy()
242 return data
243
244 def parse_operator_code(self, code):
245 c = code.BuiltinCode()
246 op_type, ser = builtin_operator_map[c]
247 if c == BuiltinOperator.CUSTOM:
248 op_type += decode_str(code.CustomCode())
249 return op_type, ser
250
251
252def read_tflite(
Diego Russoea6111a2020-04-14 18:41:58 +0100253 filename, batch_size=1, feed_dict={}, output_node_names=[], initialisation_nodes=[],
Tim Hall79d07d22020-04-27 18:20:16 +0100254):
Diego Russoea6111a2020-04-14 18:41:58 +0100255 tflite_graph = TFLiteGraph(filename, batch_size, feed_dict, output_node_names, initialisation_nodes)
Tim Hall79d07d22020-04-27 18:20:16 +0100256 nng = tflite_graph.nng
257 nng.refresh_after_modification()
258 return nng