blob: 535847d7478e21dedf60e17944cc185e5ab915b0 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18# Description:
19# Functions used to read from a TensorFlow Lite format file.
20
21from .tflite.Model import Model
22from .tflite.BuiltinOperator import BuiltinOperator
23
24import numpy as np
25import os.path
26from .nn_graph import Graph, Operation, Subgraph
27from .tensor import Tensor, QuantizationParameters
28
29from .tflite_mapping import builtin_operator_map, datatype_map, datatype_map_numpy, DataType
30
31
32def decode_str(s):
33 if s is None:
34 return ""
35 return s.decode("utf-8")
36
37
38def reshape_tensor_add_const_op(tens, reorder):
39 if not tens.reshaped:
40 original_shape = tens.shape
41 tens.name = tens.name + "_reshape"
42 tens.shape = [original_shape[idx] for idx in reorder]
43 tens.bandwidth_shape = tens.shape
44 tens.storage_shape = tens.shape
45
46 if tens.values is not None:
47 tens.values = tens.values.transpose(reorder)
48
49 if tens.quant_values is not None:
50 tens.quant_values = tens.quant_values.transpose(reorder)
51
52 op = Operation("Const", tens.name)
53 op.outputs = [tens]
54 tens.ops = [op]
55 tens.reshaped = True
56
57
58class TFLiteSubgraph:
59 def __init__(self, graph, subgraph):
60 self.graph = graph
61 self.name = decode_str(subgraph.Name())
62
63 self.tensors = []
64 for idx in range(subgraph.TensorsLength()):
65 self.tensors.append(self.parse_tensor(subgraph.Tensors(idx)))
66
67 for idx in range(subgraph.OperatorsLength()):
68 self.parse_operator(subgraph.Operators(idx))
69
70 self.outputs = [self.tensors[idx] for idx in subgraph.OutputsAsNumpy()]
71 self.inputs = [self.tensors[idx] for idx in subgraph.InputsAsNumpy()]
72
73 # Fix up tensors without operations. Generate either Placeholder or Constant ops
74 for tens in self.inputs:
75 assert not tens.ops
76 op = Operation("Placeholder", tens.name)
77 op.outputs = [tens]
78 tens.ops = [op]
79
80 for tens in self.tensors:
81 if not tens.ops:
82 op = Operation("Const", tens.name)
83 op.outputs = [tens]
84 tens.ops = [op]
85
86 def parse_tensor(self, tens_data):
87 np_shape = tens_data.ShapeAsNumpy()
88 shape = list(np_shape) if type(np_shape) is np.ndarray else []
89 name = decode_str(tens_data.Name())
90 dtype = datatype_map[tens_data.Type()]
91
92 tens = Tensor(shape, dtype, name)
93
94 quant = tens_data.Quantization()
95
96 def len1_array_to_scalar(arr):
97 # The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
98 # the input buffer. This is represented in Vela by using None.
99 # Otherwise, the fields returned are a single or multi-element array. In which case, single element arrays
100 # are converted to scalars
101 if isinstance(arr, int) and arr == 0:
102 return None
103 if len(arr) == 1:
104 return arr[0]
105 return arr
106
107 tens.quantization = QuantizationParameters()
108 tens.quantization.min = len1_array_to_scalar(quant.MinAsNumpy())
109 tens.quantization.max = len1_array_to_scalar(quant.MaxAsNumpy())
110 tens.quantization.scale_f32 = len1_array_to_scalar(quant.ScaleAsNumpy())
111 tens.quantization.zero_point = len1_array_to_scalar(quant.ZeroPointAsNumpy())
112
113 if dtype == DataType.uint8:
114 tens.quantization.quant_min = 0
115 tens.quantization.quant_max = (1 << dtype.bits) - 1
116 elif dtype in set((DataType.int8, DataType.int16, DataType.int32, DataType.int64)):
117 tens.quantization.quant_min = -(1 << (dtype.bits - 1))
118 tens.quantization.quant_max = (1 << (dtype.bits - 1)) - 1
119 else:
120 raise Exception("DataType '" + str(dtype) + "' is not supported for quantization.")
121
122 if tens.quantization.scale_f32 is None and tens.quantization.zero_point is None:
123 tens.quantization = None
124
125 tens.values = None
126 buf = self.graph.buffers[tens_data.Buffer()]
127 if buf is not None:
128 tens.values = np.array(buf.view(datatype_map_numpy[tens_data.Type()]).reshape(shape))
129 if tens.quantization is not None:
130 tens.quant_values = tens.values
131 tens.values = tens.quantization.dequantize(tens.quant_values)
132 return tens
133
134 def parse_operator(self, op_data):
135 op_type, opt_serializer = self.graph.operator_codes[op_data.OpcodeIndex()]
136 inputs = [self.tensors[idx] for idx in op_data.InputsAsNumpy()]
137 outputs = [self.tensors[idx] for idx in op_data.OutputsAsNumpy()]
138 name = "unknown_op_name"
139 if len(outputs):
140 name = outputs[0].name
141 op = Operation(op_type, name)
142 op.inputs = inputs
143 op.outputs = outputs
144 for out in op.outputs:
145 out.ops = [op]
146
147 activation_function_to_split_out = None
148
149 if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
150 reshape_tensor_add_const_op(inputs[1], (1, 2, 3, 0))
151
152 if op_type.startswith("FullyConnected"):
153 reshape_tensor_add_const_op(inputs[1], (1, 0))
154
155 if opt_serializer is not None:
156 op.attrs = opt_serializer.deserialize(op_data.BuiltinOptions(), op_data.CustomOptionsAsNumpy())
157
158 if "stride_w" in op.attrs:
159 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
160 if "filter_width" in op.attrs:
161 op.attrs["ksize"] = (1, op.attrs["filter_height"], op.attrs["filter_width"], 1)
162 if "dilation_w_factor" in op.attrs:
163 op.attrs["dilation"] = (1, op.attrs["dilation_h_factor"], op.attrs["dilation_w_factor"], 1)
164 if "depth_multiplier" in op.attrs:
165 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
166
167 if "fused_activation_function" in op.attrs:
168 if op_type in set(("ConcatTFLite",)):
169 act = op.attrs["fused_activation_function"]
170 del op.attrs["fused_activation_function"]
171 if act is not None:
172 activation_function_to_split_out = act
173
174 if activation_function_to_split_out is not None:
175 act_op = Operation(activation_function_to_split_out, name + activation_function_to_split_out)
176 out_tens = op.outputs[0]
177 intermediate_tens = out_tens.clone("_act_intermediate")
178 out_tens.ops = [act_op]
179 act_op.outputs = [out_tens]
180 intermediate_tens.ops = [op]
181 op.outputs[0] = intermediate_tens
182 act_op.inputs = [intermediate_tens]
183
184
185class TFLiteGraph:
186 def __init__(
187 self,
188 filename,
189 batch_size=1,
190 feed_dict={},
191 output_node_names=[],
192 initialisation_nodes=[],
193 ):
194
195 self.op_times = {}
196 if batch_size is None:
197 batch_size = 1
198 self.batch_size = batch_size
199 self.name = os.path.splitext(os.path.basename(filename))[0]
200 self.initialisation_nodes = initialisation_nodes
201
202 with open(filename, "rb") as f:
203 buf = bytearray(f.read())
204
205 model = Model.GetRootAsModel(buf, 0)
206
207 self.buffers = []
208 for idx in range(model.BuffersLength()):
209 self.buffers.append(self.parse_buffer(model.Buffers(idx)))
210
211 self.operator_codes = []
212 for idx in range(model.OperatorCodesLength()):
213 self.operator_codes.append(self.parse_operator_code(model.OperatorCodes(idx)))
214
215 self.subgraphs = []
216 for idx in range(model.SubgraphsLength()):
217 self.subgraphs.append(TFLiteSubgraph(self, model.Subgraphs(idx)))
218
219 self.nng = Graph(self.name, self.batch_size)
220 for tflite_sg in self.subgraphs:
221 sg = Subgraph(tflite_sg.name)
222 sg.original_inputs = tflite_sg.inputs # Preserve the original input order
223 sg.output_tensors = tflite_sg.outputs
224 self.nng.subgraphs.append(sg)
225
226 def parse_buffer(self, buf_data):
227 if buf_data.DataLength() == 0:
228 return None
229 data = buf_data.DataAsNumpy()
230 return data
231
232 def parse_operator_code(self, code):
233 c = code.BuiltinCode()
234 op_type, ser = builtin_operator_map[c]
235 if c == BuiltinOperator.CUSTOM:
236 op_type += decode_str(code.CustomCode())
237 return op_type, ser
238
239
240def read_tflite(
241 filename,
242 batch_size=1,
243 feed_dict={},
244 output_node_names=[],
245 initialisation_nodes=[],
246):
247 tflite_graph = TFLiteGraph(
248 filename, batch_size, feed_dict, output_node_names, initialisation_nodes
249 )
250 nng = tflite_graph.nng
251 nng.refresh_after_modification()
252 return nng