blob: 675b69850fe7143195a055380da5740b20cac300 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Functions used to write to a TensorFlow Lite format file. Supports adding in file identifiers.
Tim Hall79d07d22020-04-27 18:20:16 +010018import flatbuffers
Diego Russoe8a10452020-04-21 17:39:10 +010019import flatbuffers.number_types as N
20import numpy as np
21from flatbuffers import encode
Diego Russoea6111a2020-04-14 18:41:58 +010022from flatbuffers.builder import UOffsetTFlags
23
Diego Russoe8a10452020-04-21 17:39:10 +010024from .nn_graph import PassPlacement
25from .tensor import MemArea
26from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010027from .tflite import Buffer
28from .tflite import Metadata
Diego Russoe8a10452020-04-21 17:39:10 +010029from .tflite import Model
30from .tflite import Operator
31from .tflite import OperatorCode
32from .tflite import QuantizationParameters
33from .tflite import SubGraph
34from .tflite import Tensor
35from .tflite_mapping import builtin_operator_inv_map
36from .tflite_mapping import BuiltinOperator
37from .tflite_mapping import custom_prefix
38from .tflite_mapping import datatype_inv_map
39
40# ugh, the python flatbuffer interface is missing a method to add in file identifier. patching it in here:
Tim Hall79d07d22020-04-27 18:20:16 +010041
42tflite_version = 3
43tflite_file_identifier = "TFL" + str(tflite_version)
44
45
Tim Hall79d07d22020-04-27 18:20:16 +010046def FinishWithFileIdentifier(self, rootTable, fid):
47 if fid is None or len(fid) != 4:
48 raise Exception("fid must be 4 chars")
49
50 flags = N.Uint8Flags
51 prepSize = 4
52 self.Prep(self.minalign, prepSize + len(fid))
53 for i in range(3, -1, -1):
54 self.head = self.head - flags.bytewidth
55 encode.Write(flags.packer_type, self.Bytes, self.Head(), ord(fid[i]))
56
57 return self.Finish(rootTable)
58
59
60flatbuffers.Builder.FinishWithFileIdentifier = FinishWithFileIdentifier
61
62
63def make_vector(v):
64 try:
65 len(v)
66 return v
67 except TypeError:
68 return [v]
69
70
71class TFLiteSerialiser:
72 def __init__(self, nng):
73 self.builder = flatbuffers.Builder(0)
74 self.nng = nng
75
76 self.scratch_buf_id = 0 # Always assign scratch to buffer 0
77 self.buffer_offsets_map = {}
78 self.buffers_to_write = [] # have an empty array there
79
80 self.input_tensors = []
81 self.ops_to_ignore = set(("Const", "Placeholder", "SubgraphInput"))
82
83 self.tensors_to_reshape = {}
84
85 self.subgraphs_to_write = [sg for sg in self.nng.subgraphs if sg.placement == PassPlacement.Cpu]
86
87 all_ops = []
88 for sg in self.subgraphs_to_write:
89 for ps in sg.passes:
90 for op in ps.ops:
91 if op.type not in self.ops_to_ignore:
92 all_ops.append(op)
93 if op.type.startswith("Conv2D") or op.type.startswith("DepthwiseConv2d"):
94 self.tensors_to_reshape[op.inputs[1]] = (3, 0, 1, 2)
95 if op.type.startswith("FullyConnected"):
96 self.tensors_to_reshape[op.inputs[1]] = (1, 0)
97
98 self.operator_codes = list(sorted(set(op.type for op in all_ops)))
99 self.operator_code_map = {}
100
101 def write_byte_vector(self, v, alignment=1):
102 builder = self.builder
103 builder.StartVector(1, len(v), alignment)
104 for e in v[::-1]:
105 builder.PrependByte(e)
106 return builder.EndVector(len(v))
107
108 def write_int_vector(self, v):
109 builder = self.builder
110 builder.StartVector(4, len(v), 4)
111 for e in v[::-1]:
112 builder.PrependInt32(e)
113 return builder.EndVector(len(v))
114
115 def write_long_vector(self, v):
116 builder = self.builder
117 builder.StartVector(8, len(v), 8)
118 for e in v[::-1]:
119 builder.PrependInt64(e)
120 return builder.EndVector(len(v))
121
122 def write_float_vector(self, v):
123 builder = self.builder
124 builder.StartVector(4, len(v), 4)
125 for e in v[::-1]:
126 builder.PrependFloat32(e)
127 return builder.EndVector(len(v))
128
129 def write_offset_vector(self, v):
130 builder = self.builder
131 builder.StartVector(4, len(v), 4)
132 for e in v[::-1]:
133 builder.PrependUOffsetTRelative(e)
134 return builder.EndVector(len(v))
135
136 def assign_buffers_to_tensors(self, tensors):
Tim Hall25f605c2020-05-18 18:04:26 +0100137 scratch_tensors = [tens for tens in tensors if tens.purpose == TensorPurpose.Scratch]
138 if len(scratch_tensors) > 0:
139 scratch_tensor_mem_area = scratch_tensors[0].mem_area
140 else:
141 scratch_tensor_mem_area = None # all tensors are initialised to MemArea.Unknown
142
Tim Hall79d07d22020-04-27 18:20:16 +0100143 buffer_map = {}
Tim Hall79d07d22020-04-27 18:20:16 +0100144 buf_idx = 1
145
146 for tens in tensors:
Tim Hall25f605c2020-05-18 18:04:26 +0100147 if tens.mem_area == scratch_tensor_mem_area:
Tim Hall79d07d22020-04-27 18:20:16 +0100148 buffer_map[tens] = self.scratch_buf_id
149 else:
150 buffer_map[tens] = buf_idx
151 buf_idx += 1
152
153 # Initialize buffers_to_write to a length equal to numer of buffers so
154 # they can be appended at the correct index during tensor serialization
155 self.buffers_to_write = [None] * (buf_idx)
156
157 return buffer_map
158
159 def serialise_operator_code(self, idx, code):
160 builder = self.builder
161 custom_code_offset = None
162 if code.startswith(custom_prefix):
163 tf_code, opt_serializer = builtin_operator_inv_map[custom_prefix]
164 custom_code_offset = builder.CreateString(code[len(custom_prefix) :])
165 else:
166 try:
167 tf_code, opt_serializer = builtin_operator_inv_map[code]
168 except KeyError:
169 print(
Diego Russoea6111a2020-04-14 18:41:58 +0100170 "Warning: Writing operation %s, which does not have a direct TensorFlow Lite mapping,"
171 "as a custom operation" % (code,)
Tim Hall79d07d22020-04-27 18:20:16 +0100172 )
173 tf_code, opt_serializer = builtin_operator_inv_map[custom_prefix]
174
175 if tf_code == BuiltinOperator.CUSTOM:
176 assert code == "NpuOp" # Currently only support serialising NPU operators as a custom op
177 custom_code_offset = builder.CreateString("ethos-u")
178
179 self.operator_code_map[code] = (idx, tf_code, opt_serializer)
180
181 OperatorCode.OperatorCodeStart(builder)
182 OperatorCode.OperatorCodeAddBuiltinCode(builder, tf_code)
183 if custom_code_offset is not None:
184 OperatorCode.OperatorCodeAddCustomCode(builder, custom_code_offset)
185
186 return OperatorCode.OperatorCodeEnd(builder)
187
188 def serialise_quantization_parameters(self, quant):
189 builder = self.builder
190
191 min = None
192 max = None
193 scale = None
194 zero_point = None
195 if quant is not None:
196 if quant.min is not None:
197 min = self.write_float_vector(make_vector(quant.min))
198 if quant.max is not None:
199 max = self.write_float_vector(make_vector(quant.max))
200 if quant.scale_f32 is not None:
201 scale = self.write_float_vector(make_vector(quant.scale_f32))
202 if quant.zero_point is not None:
203 zero_point = self.write_long_vector(make_vector(quant.zero_point))
204
205 QuantizationParameters.QuantizationParametersStart(builder)
206 if min is not None:
207 QuantizationParameters.QuantizationParametersAddMin(builder, min)
208 if max is not None:
209 QuantizationParameters.QuantizationParametersAddMax(builder, max)
210 if scale is not None:
211 QuantizationParameters.QuantizationParametersAddScale(builder, scale)
212 if zero_point is not None:
213 QuantizationParameters.QuantizationParametersAddZeroPoint(builder, zero_point)
214 return QuantizationParameters.QuantizationParametersEnd(builder)
215
216 def serialise_tensor(self, tens):
217 builder = self.builder
218 tens_shape = tens.shape
219 values = tens.quant_values
220 if values is None:
221 values = tens.values
222
223 if values is None:
224 values = np.empty(shape=(0), dtype=np.uint8)
225
226 if tens in self.tensors_to_reshape:
227 reorder = self.tensors_to_reshape[tens]
228 tens_shape = [tens_shape[idx] for idx in reorder]
229 values = values.transpose(reorder)
230
231 if tens.purpose == TensorPurpose.Scratch:
232 tens_shape = [0]
233 self.buffers_to_write[self.scratch_buf_id] = values.flatten().view(np.uint8)
234
235 buf_id = self.buffer_map[tens]
236 if buf_id != self.scratch_buf_id:
237 self.buffers_to_write[buf_id] = values.flatten().view(np.uint8)
238
239 shape = self.write_int_vector(tens_shape)
240
241 name = builder.CreateString(tens.name)
242 quant = self.serialise_quantization_parameters(tens.quantization)
243
244 Tensor.TensorStart(builder)
245 Tensor.TensorAddShape(builder, shape)
246 Tensor.TensorAddType(builder, datatype_inv_map[tens.dtype])
247 # All tensors must have a valid backing buffer, even if it is empty.
248 # Empty buffers should be kept unique for TensorFlow Lite Micro
249 Tensor.TensorAddBuffer(builder, buf_id)
250 Tensor.TensorAddName(builder, name)
251 Tensor.TensorAddQuantization(builder, quant)
252
253 res = Tensor.TensorEnd(builder)
254 return res
255
256 def serialise_operator(self, op):
257 builder = self.builder
258
259 inputs_offset = self.write_int_vector([self.tensor_map[tens] for tens in op.inputs])
260 outputs_offset = self.write_int_vector([self.tensor_map[tens] for tens in op.outputs])
261
262 op_idx, tflop, opt_serializer = self.operator_code_map[op.type]
263
264 builtin_opt_offset = None
265 custom_opt_offset = None
266 if opt_serializer is not None:
267 attrs = dict(op.attrs)
268 if "strides" in attrs:
269 attrs["stride_h"] = attrs["strides"][1]
270 attrs["stride_w"] = attrs["strides"][2]
271 if "ksize" in attrs:
272 attrs["filter_height"] = attrs["ksize"][1]
273 attrs["filter_width"] = attrs["ksize"][2]
274 if "dilation" in attrs:
275 attrs["dilation_h_factor"] = attrs["dilation"][1]
276 attrs["dilation_w_factor"] = attrs["dilation"][2]
277 if "channel_multiplier" in attrs:
278 attrs["depth_multiplier"] = attrs["channel_multiplier"]
279
280 builtin_opt_offset, custom_opt_offset = opt_serializer.serialize(builder, attrs)
281
282 mutating_variable_inputs_offset = self.write_byte_vector([])
283 Operator.OperatorStart(builder)
284 Operator.OperatorAddOpcodeIndex(builder, op_idx)
285 Operator.OperatorAddInputs(builder, inputs_offset)
286 Operator.OperatorAddOutputs(builder, outputs_offset)
287
288 if builtin_opt_offset is not None:
289 Operator.OperatorAddBuiltinOptionsType(builder, opt_serializer.builtin_opt_type)
290 Operator.OperatorAddBuiltinOptions(builder, builtin_opt_offset)
291 if custom_opt_offset is not None:
292 Operator.OperatorAddCustomOptions(builder, custom_opt_offset)
293 Operator.OperatorAddCustomOptionsFormat(builder, opt_serializer.custom_opt_format)
294
295 Operator.OperatorAddMutatingVariableInputs(builder, mutating_variable_inputs_offset)
296 return Operator.OperatorEnd(builder)
297
298 def serialise_subgraph(self, sg):
299 builder = self.builder
300 tensor_set = set()
301
302 all_ops = []
303 for ps in sg.passes:
304 for op in ps.ops:
305 if op.type not in self.ops_to_ignore:
306 all_ops.append(op)
307
308 for op in all_ops:
309 for tens in op.inputs + op.outputs:
310 tensor_set.add(tens)
311
312 all_tensors = [tens for nm, idx, tens in sorted((tens.name, idx, tens) for idx, tens in enumerate(tensor_set))]
313
314 self.tensor_map = {tens: idx for idx, tens in enumerate(all_tensors)}
315 self.buffer_map = self.assign_buffers_to_tensors(all_tensors)
316
317 tensors_offset = self.write_offset_vector([self.serialise_tensor(tens) for tens in all_tensors])
318
319 # Add the Scratch Tensor as input to the NPU subgraph to get it allocated by TensorFlow Lite Micro
320 scratch_tensor_idx = [v for k, v in self.tensor_map.items() if k.name.endswith("scratch")]
321
322 # Make sure the input_tensors haven't been modified
323 assert all(inp in sg.original_inputs for inp in sg.input_tensors)
324 inputs_offset = self.write_int_vector(
325 [self.tensor_map[tens] for tens in sg.original_inputs] + scratch_tensor_idx
326 )
327 outputs_offset = self.write_int_vector([self.tensor_map[tens] for tens in sg.output_tensors])
328
329 operators_offset = self.write_offset_vector([self.serialise_operator(op) for op in all_ops])
330
331 SubGraph.SubGraphStart(builder)
332 SubGraph.SubGraphAddTensors(builder, tensors_offset)
333 SubGraph.SubGraphAddInputs(builder, inputs_offset)
334 SubGraph.SubGraphAddOutputs(builder, outputs_offset)
335
336 SubGraph.SubGraphAddOperators(builder, operators_offset)
337
338 return SubGraph.SubGraphEnd(builder)
339
340 def write_aligned_bytes(self, buf):
341 builder = self.builder
342 builder.nested = True
343 data = bytes(buf)
344 length_bytes = UOffsetTFlags.py_type(len(data))
345 builder.Prep(16, length_bytes) # Reserve aligned storage
346 builder.head = UOffsetTFlags.py_type(builder.Head() - length_bytes) # Update FlatBuffer internal pointer
347 builder.Bytes[builder.Head() : builder.Head() + length_bytes] = data # Assign bytes to aligned area
348 return builder.EndVector(length_bytes)
349
350 def serialise_buffer(self, buf):
351 builder = self.builder
352 data = None
353 if buf is not None:
354 data = self.write_aligned_bytes(buf)
355 Buffer.BufferStart(builder)
356 if data is not None:
357 Buffer.BufferAddData(builder, data)
358 return Buffer.BufferEnd(builder)
359
360 def serialise_metadata(self, metadata):
361 builder = self.builder
362 name = builder.CreateString(metadata[0])
363
364 Metadata.MetadataStart(builder)
365 Metadata.MetadataAddName(builder, name)
366 Metadata.MetadataAddBuffer(builder, metadata[1])
367
368 return Metadata.MetadataEnd(builder)
369
370 def serialise_model(self):
371 builder = self.builder
372 operator_code_offset = self.write_offset_vector(
373 [self.serialise_operator_code(idx, code) for idx, code in enumerate(self.operator_codes)]
374 )
375
376 description = builder.CreateString("Vela Optimised")
377
378 subgraph_offset = self.write_offset_vector([self.serialise_subgraph(sg) for sg in self.subgraphs_to_write])
379
380 # Fill the metadata buffer
381 version = np.int32(0)
382 subgraph_idx = np.int32(len(self.subgraphs_to_write)) # Only 1 supported currently
383 nbr_tensors = np.int32(len(self.tensor_map))
384
385 # An offset of -1 indicates that the tensor will be allocated online by Tensorflow Lite Micro
386 offsets = [np.int32(-1)] * nbr_tensors
387
388 # Ensure that the order of the offsets match the order of the tensors
389 for tens, idx in self.tensor_map.items():
390 if tens.mem_area == MemArea.Sram:
391 offsets[idx] = np.int32(tens.address)
392
393 metadata_buffer = np.array([version, subgraph_idx, nbr_tensors] + offsets)
394 self.buffers_to_write.append(metadata_buffer)
395
396 buffers_offset = self.write_offset_vector([self.serialise_buffer(buf) for buf in self.buffers_to_write])
397
398 metadata_list = [("OfflineMemoryAllocation", len(self.buffers_to_write) - 1)]
399 metadata_offset = self.write_offset_vector([self.serialise_metadata(metadata) for metadata in metadata_list])
400
401 Model.ModelStart(builder)
402 Model.ModelAddVersion(builder, tflite_version)
403 Model.ModelAddOperatorCodes(builder, operator_code_offset)
404 Model.ModelAddSubgraphs(builder, subgraph_offset)
405 Model.ModelAddDescription(builder, description)
406 Model.ModelAddBuffers(builder, buffers_offset)
407 Model.ModelAddMetadata(builder, metadata_offset)
408 return Model.ModelEnd(builder)
409
410 def serialise(self):
411
412 model = self.serialise_model()
413
414 self.builder.FinishWithFileIdentifier(model, tflite_file_identifier)
415
416 return self.builder.Output()
417
418 def write(self, filename):
419 with open(self.filename, "wb") as f:
420 f.write(self.serialised_buf)
421
422
423def write_tflite(nng, filename):
424 writer = TFLiteSerialiser(nng)
425 buf = writer.serialise()
426
427 with open(filename, "wb") as f:
428 f.write(buf)