blob: dc2d803d00211d3569d4e4e07f3cc27890f73b7e [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
3# Copyright (c) 2020, ARM Limited.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
32
33from enum import IntEnum, Enum, unique
34
35import tosa_serializer as ts
36from tosa_serializer import *
37import tosa
38
39# Convenience variables to the flatc-generated types that should be enums, but aren't
40DType = tosa.DType.DType()
41Usage = tosa.Usage.Usage()
42Format = tosa.Format.Format()
43Op = tosa.Op.Op()
44ResizeMode = tosa.ResizeMode.ResizeMode()
45
46class TosaQuantGen:
47 '''QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion'''
48 def __init__(self):
49 pass
50
51 @staticmethod
52 def needsQinfo(op, dtype):
53 if dtype == DType.AINT8 or dtype == DType.INT8:
54 return True
55 return False
56
57 @staticmethod
58 def qgUnary(testGen, op, dtype):
59 qinfo = ts.TosaSerializerQuantInfo()
60 if TosaQuantGen.needsQinfo(op, dtype):
61 qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt())
62 else:
63 qinfo.UnaryQuantInfo(0, 0)
64 return qinfo
65
66 @staticmethod
67 def qgConv(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
69 if TosaQuantGen.needsQinfo(op, dtype):
70 qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt())
71 else:
72 qinfo.ConvQuantInfo(0, 0)
73 return qinfo
74
75 @staticmethod
76 def qgMatmul(testGen, op, dtype):
77 qinfo = ts.TosaSerializerQuantInfo()
78 if TosaQuantGen.needsQinfo(op, dtype):
79 qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt())
80 else:
81 qinfo.MatMulQuantInfo(0, 0)
82 return qinfo
83
84 @staticmethod
85 def qgPad(testGen, op, dtype):
86 qinfo = ts.TosaSerializerQuantInfo()
87 if TosaQuantGen.needsQinfo(op, dtype):
88 qinfo.PadQuantInfo(testGen.randInt())
89 else:
90 qinfo.PadQuantInfo(0)
91 return qinfo
92
93 @staticmethod
94 def computeMultiplierAndShift(scaleFp, scale32):
95 # Derived from computeMultiplierAndShiftTosaScale32
96 # Provide a floating-point scaling factor and the scale32 parameter
97 # to compute the multiplier and shift
98
99 if scale32:
100 scaleBits = 31
101 else:
102 scaleBits = 15
103
104 m, shift = math.frexp(scaleFp)
105
106 if scaleFp < 0.0:
107 m = -m
108
109 multiplier = round(m * (1 << scaleBits))
110 assert(multiplier <= (1 << scaleBits))
111
112 if multiplier == (1 << scaleBits):
113 multiplier = multiplier // 2
114 shift = shift + 1
115
116 shift = (-shift) + scaleBits
117 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
118
119 assert(multiplier <= (1 << scaleBits))
120 assert(shift >= 0 and shift <= 63)
121
122 return multiplier, shift
123
124
125class TosaTensorGen():
126 ''' Tensor generators create a shape list for the placeholder and const tensor
127 data operands for the operator. The actual random data is generated separately for each test.'''
128 def __init__(self):
129 pass
130
131 @staticmethod
132 def tgBasic(testGen, opName, rank):
133 pl, const = opName['operands']
134 shape = testGen.makeShape(rank)
135
136 shape_list = []
137 for i in range(pl + const):
138 shape_list.append(shape.copy())
139
140 return shape_list
141
142 @staticmethod
143 def tgNHWC(testGen, opName, rank):
144 pl, const = opName['operands']
145
146 assert(rank == 4)
147
148 shape = testGen.makeShape(rank)
149
150 # Constrict the batch size?
151 if testGen.args.max_batch_size:
152 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
153
154 shape_list = []
155 for i in range(pl + const):
156 shape_list.append(shape.copy())
157
158 return shape_list
159
160 @staticmethod
161 def tgBroadcastFuzz(testGen, op, rank):
162 shape = testGen.makeShape(rank)
163
164 pl, const = op['operands']
165
166 shape_list = []
167
168 # Choose one of the inputs to broadcast
169 bcast_idx = testGen.randInt(0, pl + const)
170 for i in range(pl + const):
171 shape_bcast = shape.copy()
172
173 # If the chosen input, pick a random index to broadcast
174 if i == bcast_idx:
175 fuzz_idx = testGen.randInt(0, rank)
176 shape_bcast[fuzz_idx] = 1
177
178 shape_list.append(shape_bcast)
179
180 return shape_list
181
182 @staticmethod
183 def tgConv2D(testGen, op, rank):
184 pl, const = op['operands']
185
186 assert(rank == 4)
187
188 # IFM dimensions are NHWC
189 ifm_shape = testGen.makeShape(rank)
190
191 # Constrict the batch size?
192 if testGen.args.max_batch_size:
193 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
194
195 # Get the filter height/width from the operator parameters
196 filter_hw = op['filter']
197
198 # Generate a random OFM depth
199 ofm_depth = testGen.makeShape(1)[0]
200
201 # The filter dimensions are OHWI
202 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
203
204 # The bias is OC
205 bias_shape = np.asarray([ofm_depth])
206
207 return [ifm_shape, filter_shape, bias_shape]
208
209 @staticmethod
210 def tgTransposeConv2D(testGen, op, rank):
211 pl, const = op['operands']
212
213 assert(rank == 4)
214
215 # IFM dimensions are NHWC
216 ifm_shape = testGen.makeShape(rank)
217
218 # Constrict the batch size?
219 if testGen.args.max_batch_size:
220 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
221
222 # Get the filter height/width from the operator parameters
223 filter_hw = op['filter']
224
225 # Generate a random OFM depth
226 ofm_depth = testGen.makeShape(1)[0]
227
228 # The filter dimensions are OHWI
229 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
230
231 return [ifm_shape, filter_shape]
232
233 @staticmethod
234 def tgDepthwiseConv2D(testGen, op, rank):
235 pl, const = op['operands']
236
237 assert(rank == 4)
238 assert(pl == 1 and const == 2)
239
240 # IFM dimensions are NHWC
241 ifm_shape = testGen.makeShape(rank)
242
243 # Constrict the batch size?
244 if testGen.args.max_batch_size:
245 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
246
247 # Get the filter height/width from the operator parameters
248 # Filter is KH, HW, C, M
249 filter_hw = op['filter']
250
251 # Generate a random OFM depth, but don't let it get too big because
252 # the output depth is M * C
253 filter_m = (testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)) + 1
254
255 # The filter dimensions are HWCM
256 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
257
258 # The bias is M * C
259 bias_shape = np.asarray([ifm_shape[3] * filter_m])
260
261 return [ifm_shape, filter_shape, bias_shape]
262
263 @staticmethod
264 def tgFullyConnected(testGen, op, rank):
265 pl, const = op['operands']
266
267 assert(rank == 2)
268 assert(pl == 2 and const == 0)
269
270 input_shape = testGen.makeShape(rank)
271 filter_oc = testGen.makeShape(1)[0]
272 filter_shape = np.asarray([filter_oc, input_shape[1]])
273
274 bias_shape = np.asarray([filter_oc])
275
276 return [input_shape, filter_shape, bias_shape]
277
278 @staticmethod
279 def tgMatmul(testGen, op, rank):
280 pl, const = op['operands']
281
282 assert(rank == 2)
283 assert(pl == 2 and const == 0)
284
285 a_shape = testGen.makeShape(rank)
286 b_oc = testGen.makeShape(1)[0]
287 b_shape = np.asarray([a_shape[1], b_oc])
288
289 return [a_shape, b_shape]
290
291class TosaArgGen:
292 '''Argument generators create exhaustive or random lists of attributes for operators that take
293 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
294 tuples where the descriptive_name is appended to the test name and the arglist is expanded
295 as arguments to the operator build function.'''
296 def __init__(self):
297 pass
298
299 @staticmethod
300 def agNone(testGen, opName, shapeList, dtype):
301 '''A trivial argument generator for operators that don't take any
302 non-tensor arguments'''
303 return [('', [])]
304
305 @staticmethod
306 def agAxis(testGen, opName, shapeList, dtype):
307 '''Build the axis argument for operators that take a single axis'''
308 axes = []
309
310 shape = shapeList[0]
311
312 for a in range(0, len(shape)):
313 axes.append(('axis_{}'.format(a), [a]))
314 return axes
315
316 @staticmethod
317 def agConv2D(testGen, opName, shapeList, dtype):
318 arg_list = []
319
320 ifm_shape = shapeList[0]
321 filter_shape = shapeList[1]
322
323 # Must be rank 4
324 assert(len(ifm_shape) == 4)
325 assert(len(filter_shape) == 4)
326
327 maxStride = testGen.args.max_conv_stride
328 maxPadding = testGen.args.max_conv_padding + 1
329 maxDilation = testGen.args.max_conv_dilation
330
331 # Strides, padding, dilations
332 for stride in range(0, maxStride ** 2):
333 for padding in range(0, (maxPadding) ** 4):
334 for dilation in range(0, maxDilation ** 2):
335
336 s = [stride // maxStride + 1,
337 stride % maxStride + 1]
338 p = [(padding // (maxPadding * 4)) % maxPadding,
339 (padding // (maxPadding * 2)) % maxPadding,
340 (padding // (maxPadding * 1)) % maxPadding,
341 padding % maxPadding]
342 d = [ dilation // maxDilation + 1,
343 dilation % maxDilation + 1]
344
345 # 4 padding parameters for regular conv2d
346 arg_list.append(('st{}{}_pad{}{}{}{}_dilat{}{}'.format(s[0], s[1],
347 p[0], p[1], p[2], p[3],
348 d[0], d[1]),
349 [ s, p, d ]))
350 return arg_list
351
352 @staticmethod
353 def agTransposeConv2D(testGen, opName, shapeList, dtype):
354 arg_list = []
355
356 ifm_shape = shapeList[0]
357 filter_shape = shapeList[1]
358
359 # Must be rank 4
360 assert(len(ifm_shape) == 4)
361 assert(len(filter_shape) == 4)
362
363 maxStride = testGen.args.max_conv_stride
364 maxPadding = testGen.args.max_conv_padding + 1
365 maxDilation = testGen.args.max_conv_dilation
366
367 # Strides, padding, dilations
368 for stride in range(0, maxStride ** 2):
369 for out_padding in range(0, (maxPadding) ** 2):
370 for dilation in range(0, maxDilation ** 2):
371
372 s = [stride // maxStride + 1,
373 stride % maxStride + 1]
374 p = [(out_padding // (maxPadding * 1)) % maxPadding,
375 out_padding % maxPadding]
376 d = [ dilation // maxDilation + 1,
377 dilation % maxDilation + 1]
378
379 oh = (ifm_shape[1] - filter_shape[1] - (filter_shape[1] - 1) * (d[0] - 1) + \
380 2 * p[0]) // s[0] + 1
381
382 ow = (ifm_shape[2] - filter_shape[2] - (filter_shape[2] - 1) * (d[1] - 1) + \
383 2 * p[1]) // s[1] + 1
384
385 # Output shape
386 os = [ ifm_shape[0], oh, ow, filter_shape[0] ]
387
388 arg_list.append(('st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}'.format(s[0], s[1],
389 p[0], p[1],
390 d[0], d[1],
391 os[0], os[1], os[2], os[3]),
392 [ s, p, d, os ]))
393
394 return arg_list
395
396 @staticmethod
397 def agPad(testGen, opName, shapeList, dtype):
398 arg_list = []
399 rank = len(shapeList[0])
400
401 # Exhaustively test combinations of 0/1 padding on each side of each dimension
402 # This process might need some revision for >1 padding, but use rank**2 as a bitmask
403 # for now
404 for v in range(rank ** 2):
405
406 # Create a flat arraypadding4D
407 paddings = np.zeros((rank * 2), dtype=np.int32)
408
409 # Fill in the 1's
410 for r in (range(rank * 2)):
411 if (v >> r) & 1:
412 paddings[r] = 1
413
414 # Reshape back to a 2D array
415 paddings = paddings.reshape((rank, 2))
416
417 arg_list.append(('pad{0:b}'.format(v), [ paddings ]))
418
419 return arg_list
420
421 @staticmethod
422 def agPooling(testGen, opName, shapeList, dtype):
423 arg_list = []
424
425 shape = shapeList[0]
426 assert(len(shape) == 4)
427
428 maxStride = testGen.args.max_pooling_stride
429 maxKernel = testGen.args.max_pooling_kernel
430 maxPadding = testGen.args.max_pooling_padding + 1
431
432 for kernel in range(0, maxKernel ** 2):
433 for stride in range(0, maxStride ** 2):
434 for padding in range(0, maxPadding ** 4):
435 s = [stride // maxStride + 1,
436 stride % maxStride + 1]
437 k = [(kernel // maxKernel) + 2,
438 (kernel % maxKernel) + 2]
439 p = [(padding // (maxPadding * 4)) % maxPadding,
440 (padding // (maxPadding * 2)) % maxPadding,
441 (padding // (maxPadding * 1)) % maxPadding,
442 padding % maxPadding]
443
444 arg_list.append(('st{}{}_kern{}{}_pad{}{}{}{}'.format(s[0], s[1],
445 k[0], k[1],
446 p[0], p[1], p[2], p[3]),
447 [k, s, p]))
448 return arg_list
449
450 @staticmethod
451 def agCast(testGen, opName, shapeList, inDtype):
452 arg_list = []
453
454 # Enumerate the output types here
455 if inDtype == DType.INT8:
456 dtypeList = [ DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT ]
457 elif inDtype == DType.INT16:
458 dtypeList = [ DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT ]
459 elif inDtype == DType.INT32:
460 dtypeList = [ DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT ]
461 elif inDtype == DType.BOOL:
462 dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
463 elif inDtype == DType.FLOAT:
464 dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
465 else:
466 raise Exception('Unexpected input dtype: {}'.format(inDtype))
467
468 for dtype in dtypeList:
469 arg_list.append(('out{}'.format(DTypeNames[dtype]), [dtype]))
470
471 return arg_list
472
473 @staticmethod
474 def agRescale(testGen, opName, shapeList, inDtype):
475 arg_list = []
476
477 # Enumerate the output types here
478 for dtype in [ DType.AINT8, DType.INT16, DType.INT32 ]:
479 for scale32 in [ False, True ]:
480 for double_round in [ False, True ]:
481 for per_channel in [ False, True ]:
482
483 if inDtype == DType.INT48 and scale32:
484 # Illegal condition. Must be scale32=False
485 continue
486
487 arg_list.append(('out{}_sc{}_dr{}_pc{}'.format(DTypeNames[dtype], int(scale32), int(double_round), int(per_channel)),
488 [dtype, scale32, double_round, per_channel]))
489
490 return arg_list
491
492 # Helper function for reshape. Gets some factors of a larger number.
493 @staticmethod
494 def getFactors(val, start=1):
495 factors = []
496
497 for i in range(start, int(np.sqrt(val))):
498 if (val % i) == 0:
499 factors.append(i)
500
501 return factors
502
503 @staticmethod
504 def agReshape(testGen, opName, shapeList, dtype):
505 arg_list = []
506
507 origShape = shapeList[0]
508
509 totalElements = 1
510 for s in origShape:
511 totalElements *= s
512
513 # This code is NOT fast. Fortunately, the numbers are fairly small.
514 factors = TosaArgGen.getFactors(totalElements)
515
516 for p in range(testGen.args.num_rand_permutations):
517 newRank = testGen.randInt(1, 6)
518 newShape = []
519 if (len(factors) < newRank):
520 continue
521
522 remainingElements = totalElements
523 shuffledFactors = testGen.rng.permutation(factors)
524 for i in range(newRank):
525 # pick rank-1 factors
526 newShape.append(shuffledFactors[0])
527 remainingElements = remainingElements // shuffledFactors[0]
528 shuffledFactors = testGen.rng.permutation(TosaArgGen.getFactors(remainingElements))
529 newShape.append(remainingElements)
530
531 # Toss in a -1 sometimes
532 minusOne = testGen.randInt(0, newRank * 4)
533 if minusOne < newRank:
534 newShape[minusOne] = -1
535
536 arg_list.append(('perm{}_rank{}'.format(p, newRank), [newShape]))
537
538 return arg_list
539
540
541 @staticmethod
542 def agTranspose(testGen, opName, shapeList, dtype):
543 arg_list = []
544
545 ifm_shape = shapeList[0]
546
547 perms = range(len(ifm_shape))
548 for p in range(testGen.args.num_rand_permutations):
549 perms = np.int32(testGen.rng.permutation(perms)).tolist()
550
551 # Avoid duplicates
552 found = False
553 for name, other_perm in arg_list:
554 if other_perm[0] == perms:
555 found = True
556 break
557
558 if not found:
559 arg_list.append(('perm{}'.format(p), [perms]))
560
561 return arg_list
562
563 @staticmethod
564 def agSlice(testGen, opName, shapeList, dtype):
565 arg_list = []
566
567 ifm_shape = shapeList[0]
568 rank = len(ifm_shape)
569
570 for p in range(testGen.args.num_rand_permutations):
571 begin = []
572 size = []
573
574 valid=True
575
576 for i in range(rank):
577 if ifm_shape[i] > 1:
578 begin.append(testGen.randInt(0, ifm_shape[i]))
579 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
580
581 # Invalid slice size?
582 if size[i] == 0:
583 valid = False
584 else:
585 begin.append(0)
586 size.append(1)
587
588 if valid:
589 arg_list.append(('perm{}'.format(p), [begin, size]))
590 return arg_list
591
592 @staticmethod
593 def agTile(testGen, opName, shapeList, dtype):
594 arg_list = []
595
596 ifm_shape = shapeList[0]
597 rank = len(ifm_shape)
598
599 for p in range(testGen.args.num_rand_permutations):
600
601 # Pick a few random, but small multiple values
602 # because otherwise this has a tendency to generate
603 # enormous tensors
604 multiples = []
605 for i in range(rank):
606 multiples.append(testGen.randInt(1, 4))
607
608 arg_list.append(('perm{}'.format(p), [multiples]))
609
610 return arg_list
611
612 @staticmethod
613 def agResize(testGen, opName, shapeList, dtype):
614 arg_list = []
615
616 ifm_shape = shapeList[0]
617
618 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
619
620 # Exclude illegal {mode, type} configurations. Pick legal output types
621 if m == ResizeMode.NEAREST and dtype == DType.INT8:
622 outputDTypeList = [ DType.INT32 ]
623 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
624 outputDTypeList = [ DType.INT16 ]
625 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
626 outputDTypeList = [ DType.INT8 ]
627 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
628 outputDTypeList = [ DType.INT48 ]
629 else:
630 continue
631
632 for outputDType in outputDTypeList:
633 for perm in range(testGen.args.num_rand_permutations):
634
635 # Randomly generate legal output dimensions and shift
636 # and then compute the stride and offset based on them
637 output_dims = [ testGen.randInt(), testGen.randInt() ]
638
639 shift = testGen.randInt(1, 11)
640
641 stride = [ (ifm_shape[1] << shift) // output_dims[0],
642 (ifm_shape[2] << shift) // output_dims[1] ]
643
644 offset = [ testGen.randInt(-stride[0], (ifm_shape[1] << shift) - (output_dims[0] - 1) * stride[0]),
645 testGen.randInt(-stride[1], (ifm_shape[2] << shift) - (output_dims[1] - 1) * stride[1]) ]
646
647 arg_list.append(('mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}'.format(m, shift, output_dims[0], output_dims[1],
648 testGen.typeStr(outputDType), stride[0], stride[1],
649 offset[0], offset[1]),
650 [m, stride, offset, shift, output_dims, outputDType]))
651
652 return arg_list
653
654 def agCondIf(testGen, opName, shapeList, dtype):
655 # CondIf generates the condition values here.
656 # Convert to tensors in the build function, along with the
657 # then and else blocks
658 arg_list = []
659
660 for c in [False, True]:
661 arg_list.append(('cond{}'.format(int(c)), [ c ]))
662
663 return arg_list
664
665 def agWhileLoop(testGen, opName, shapeList, dtype):
666 # While loop: 0 iterations, 1, more than 1
667 arg_list = []
668
669 for iter in [0, 1, 4]:
670 arg_list.append(('iter{}'.format(iter), [ iter ]))
671
672 return arg_list
673
674class TosaTestGen:
675 def __init__(self, args):
676 self.args = args
677 self.basePath = args.output_dir
678 self.random_seed = args.random_seed
679 self.ser = None
680 self.rng = np.random.default_rng(self.random_seed)
681 self.createDynamicOpLists()
682 self.initOpListDefaults()
683 self.quantGen = TosaQuantGen()
684 # Force makeShape to do a specific starting shape
685 self.targetted_shape = None
686
687 def createSerializer(self, opName, testPath):
688 self.testPath = os.path.join(opName, testPath)
689
690 fullPath = os.path.join(self.basePath, self.testPath)
691 os.makedirs(fullPath, exist_ok=True)
692 self.ser = ts.TosaSerializer(fullPath)
693
694 def getSerializer(self):
695 return self.ser
696
697 def serialize(self, testName):
698 with open(os.path.join(self.basePath, self.testPath, '{}.tosa'.format(testName)), 'wb') as fd:
699 fd.write(self.ser.serialize())
700
701 with open(os.path.join(self.basePath, self.testPath, 'desc.json'), 'w') as fd:
702 fd.write(self.ser.writeJson('{}.tosa'.format(testName)))
703
704 def getRandTensor(self, shape, dtype):
705 RAND_SHIFT_FACTOR = 0.5
706 RAND_SCALE_FACTOR = 4.0
707
708 if dtype == DType.BOOL:
709 np_dt = np.bool
710 return np.bool_(self.rng.choice(a=[False, True], size=shape))
711 elif dtype == DType.AINT8:
712 return np.int32(self.rng.integers(low=0, high=256, size=shape))
713 elif dtype == DType.INT4:
714 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
715 elif dtype == DType.INT8:
716 return np.int32(self.rng.integers(low=-127, high=128, size=shape))
717 elif dtype == DType.INT16:
718 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
719 elif dtype == DType.INT32:
720 return np.int32(self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape))
721 elif dtype == DType.INT48:
722 return np.int64(self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape))
723 elif dtype == DType.FLOAT:
724 return np.float32(self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR)
725 else:
726 raise Exception('Unrecognized Dtype: {}'.format(dtype))
727
728 def buildPlaceholderTensors(self, shape_list, dtype):
729 placeholders = []
730
731 for shape in shape_list:
732 arr = self.getRandTensor(shape, dtype)
733 placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr))
734
735 return placeholders
736
737 def buildConstTensors(self, shape_list, dtype):
738 consts = []
739
740 for shape in shape_list:
741 arr = self.getRandTensor(shape, dtype)
742 consts.append(self.ser.addConst(shape, dtype, Usage.ACTIVATION, [], arr))
743
744 return consts
745
746 def makeShape(self, rank):
747 if self.targetted_shape:
748 return np.int32(self.targetted_shape)
749 return np.int32(self.rng.integers(low=self.args.tensor_shape_range[0],
750 high=self.args.tensor_shape_range[1],
751 size=rank))
752
753 def setTargetShape(self, shape):
754 self.targetted_shape = shape
755
756 def randInt(self, low=0, high=256):
757 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
758
759 def getRandNumberDType(self, dtype):
760 if dtype == DType.FLOAT:
761 return self.rng.random()
762 elif dtype == DType.BOOL:
763 return self.rng.choice([False, True])
764 elif dtype == DType.INT4:
765 low, high = (-7, 8)
766 elif dtype == DType.AINT8:
767 low, high = (0, 256)
768 elif dtype == DType.INT8:
769 low, high = (-127, 128)
770 elif dtype == DType.INT16:
771 low, high = (-32768, 32768)
772 elif dtype == DType.INT32:
773 low, high = (-(1<<31), (1<<31))
774 elif dtype == DType.INT48:
775 low, high = (-(1<<47), (1<<47))
776 # Special size
777 return np.int64(self.rng.integers(low, high, size=1))[0]
778 else:
779 raise Exception('Unknown dtype: {}'.format(dtype))
780
781 return np.int32(self.rng.integers(low, high, size=1))[0]
782
783 def shapeStr(self, shape):
784
785 sStr = []
786 # Convert to strings
787 for i in shape:
788 sStr.append(str(i))
789
790 return 'x'.join(sStr)
791
792 def typeStr(self, t):
793 if t == DType.BOOL:
794 return 'b'
795 elif t == DType.AINT8:
796 return 'a8'
797 elif t == DType.INT4:
798 return 'i4'
799 elif t == DType.INT8:
800 return 'i8'
801 elif t == DType.INT16:
802 return 'i16'
803 elif t == DType.INT32:
804 return 'i32'
805 elif t == DType.INT48:
806 return 'i48'
807 elif t == DType.FLOAT:
808 return 'float'
809 else:
810 raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
811
812 def typeWidth(self, t):
813 ''' Get the datatype width for integer types'''
814 if t == DType.AINT8:
815 return 8
816 elif t == DType.UINT8:
817 return 8
818 elif t == DType.INT4:
819 return 4
820 elif t == DType.INT8:
821 return 8
822 elif t == DType.INT16:
823 return 16
824 elif t == DType.INT32:
825 return 32
826 elif t == DType.INT48:
827 return 48
828 else:
829 raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
830
831 # Argument generators
832 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
833 # Where the string descriptor is used to generate the test name and
834 # The build_fcn_arg_list is expanded and passed to the operator test
835 # build function
836
837
838 def build_unary(self, op, a, qinfo = None):
839 result_tens = OutputShaper.unaryOp(self.ser, a)
840 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
841 return result_tens
842
843 def build_binary_broadcast(self, op, a, b):
844 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
845 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
846 return result_tens
847
848 def build_binary_nonbroadcast(self, op, a, b):
849 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
850 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
851 return result_tens
852
853 def build_mul(self, op, a, b):
854 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
855
856 # Special for multiply:
857 # Force the result to INT32 for INT types
858 if a.dtype != DType.FLOAT:
859 result_tens.setDtype(DType.INT32)
860
861 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
862 return result_tens
863
864 def build_table(self, op, a):
865 # Constant size, random values
866 table_arr = self.getRandTensor([513], DType.INT16)
867 table_tens = self.ser.addConst(table_arr.shape, DType.INT16, Usage.INDEX, [], table_arr)
868
869 result_tens = OutputShaper.tableOp(self.ser, a, table_tens)
870 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
871
872 return result_tens
873
874 def build_select(self, op, cond, a, b):
875
876 # Replace the cond tensor with a boolean tensor since it probably
877 # has the wrong dtype
878 t = self.buildPlaceholderTensors([cond.shape], DType.BOOL)
879 cond = t[0]
880
881 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
882 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
883
884 return result_tens
885
886 def build_comparison(self, op, a, b):
887 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
888 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
889 return result_tens
890
891 def build_argmax(self, op, a, axis):
892 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
893
894 attr = ts.TosaSerializerAttribute()
895 attr.AxisAttribute(axis)
896
897 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
898 return result_tens
899
900 def build_pool2d(self, op, input, kernel, stride, pad, qinfo = None):
901 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
902
903 attr = ts.TosaSerializerAttribute()
904 attr.Pool2dAttribute(kernel, stride, pad)
905 input.addFormat(Format.NHWC)
906
907 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
908 return result_tens
909
910 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
911 assert(len(padding) == 4)
912 result_tens = OutputShaper.conv2dOp(self.ser, ifm, filter, strides, padding, dilations)
913
914 attr = ts.TosaSerializerAttribute()
915 attr.Conv2dAttribute(padding, strides, dilations)
916
917 ifm.addFormat(Format.NHWC)
918 # Update the filter ordering
919 filter.addUsage(Usage.WEIGHT)
920 filter.addFormat(Format.OHWI)
921
922 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
923 return result_tens
924
925 def build_transpose_conv2d(self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo):
926 assert(len(outpad) == 2)
927 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
928
929 attr = ts.TosaSerializerAttribute()
930 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
931
932 ifm.addFormat(Format.NHWC)
933 # Update the filter ordering
934 filter.addUsage(Usage.WEIGHT)
935 filter.addFormat(Format.OHWI)
936
937 # Create bias here since the acc_t depends on (but isn't the same as) the input dtype
938 # The bias is OC
939 if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8:
940 bias_type = DType.INT32
941 elif ifm.dtype == DType.INT16:
942 bias_type = DType.INT48
943 elif ifm.dtype == DType.FLOAT:
944 bias_type = DType.FLOAT
945 else:
946 raise Exception('Unsupported dtype for transpose_conv2d: {}'.format(ifm.dtype))
947
948 bias_arr = self.getRandTensor([filter.shape[0]], bias_type)
949 bias_tens = self.ser.addConst([filter.shape[0]], bias_type, [], [], bias_arr)
950
951 self.ser.addOperator(op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo)
952 return result_tens
953
954 def build_depthwise_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
955 result_tens = OutputShaper.depthwiseConv2dOp(self.ser, ifm, filter, strides, padding, dilations)
956
957 attr = ts.TosaSerializerAttribute()
958 attr.Conv2dAttribute(padding, strides, dilations)
959
960 ifm.addFormat(Format.NHWC)
961 filter.addUsage(Usage.WEIGHT)
962 filter.addFormat(Format.HWIM)
963
964 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
965 return result_tens
966
967 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
968 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
969
970 filter.addUsage(Usage.WEIGHT)
971 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo)
972 return result_tens
973
974 def build_matmul(self, op, a, b, qinfo):
975 result_tens = OutputShaper.matmulOp(self.ser, a, b)
976 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
977 return result_tens
978
979 def build_reduce(self, op, a, axis):
980 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
981
982 attr = ts.TosaSerializerAttribute()
983 attr.AxisAttribute(axis)
984
985 self.ser.addOperator(op, [a.name], result_tens.name, attr)
986 return result_tens
987
988 def build_clamp(self, op, a):
989 result_tens = OutputShaper.unaryOp(self.ser, a)
990
991 attr = ts.TosaSerializerAttribute()
992
993 # Get two random ints
994 v = [self.randInt(), self.randInt()]
995
996 if a.dtype == DType.FLOAT:
997 attr.ClampAttribute(0, 0, min(v), max(v))
998 else:
999 attr.ClampAttribute(min(v), max(v), 0, 0)
1000
1001 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1002 return result_tens
1003
1004 def build_leaky_relu(self, op, a):
1005 result_tens = OutputShaper.unaryOp(self.ser, a)
1006 attr = ts.TosaSerializerAttribute()
1007
1008 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1009
1010 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1011 return result_tens
1012
1013 # Needs an additional type/input
1014 def build_prelu(self, op, a):
1015 result_tens = OutputShaper.unaryOp(self.ser, a)
1016
1017 self.ser.addOperator(op, [a.name], [result_tens.name])
1018 return result_tens
1019
1020 def build_relun(self, op, a):
1021 result_tens = OutputShaper.unaryOp(self.ser, a)
1022
1023 attr = ts.TosaSerializerAttribute()
1024
1025 if a.dtype == DType.FLOAT:
1026 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1027 else:
1028 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1029
1030 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1031 return result_tens
1032
1033 def build_sigmoid(self, op, a):
1034 result_tens = OutputShaper.unaryOp(self.ser, a)
1035 self.ser.addOperator(op, [a.name], [result_tens.name])
1036 return result_tens
1037
1038 def build_tanh(self, op, a):
1039 result_tens = OutputShaper.unaryOp(self.ser, a)
1040 self.ser.addOperator(op, [a.name], [result_tens.name])
1041 return result_tens
1042
1043 def build_concat(self, op, a, b, axis):
1044 result_tens = OutputShaper.concatOp(self.ser, a, b, axis)
1045
1046 attr = ts.TosaSerializerAttribute()
1047 attr.AxisAttribute(axis)
1048
1049 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1050
1051 def build_pad(self, op, a, padding, qinfo):
1052 result_tens = OutputShaper.padOp(self.ser, a, padding)
1053
1054 # Need to turn the padding array into a TOSA tensor here.
1055 # This is one of the few tensor operands that does not get
1056 # randomly generated
1057 padding_tens = self.ser.addConst(padding.shape, DType.INT32, [], [], padding)
1058
1059 self.ser.addOperator(op, [a.name, padding_tens.name], [result_tens.name], None, qinfo)
1060
1061 def build_reshape(self, op, a, newShape):
1062 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1063
1064 attr = ts.TosaSerializerAttribute()
1065 attr.ReshapeAttribute(newShape)
1066
1067 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1068 return result_tens
1069
1070 def build_reverse(self, op, a, axis):
1071 result_tens = OutputShaper.unaryOp(self.ser, a)
1072
1073 attr = ts.TosaSerializerAttribute()
1074 attr.AxisAttribute(axis)
1075
1076 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1077 return result_tens
1078
1079 def build_transpose(self, op, a, perms):
1080 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1081
1082 perms_tens = self.ser.addConst([len(perms)], DType.INT32, Usage.ACTIVATION, [], np.int32(perms))
1083
1084 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1085 return result_tens
1086
1087 def build_slice(self, op, a, begin, size):
1088 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1089
1090 attr = ts.TosaSerializerAttribute()
1091 attr.SliceAttribute(begin, size)
1092
1093 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1094 return result_tens
1095
1096 def build_tile(self, op, a, multiples):
1097 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1098
1099 attr = ts.TosaSerializerAttribute()
1100 attr.TileAttribute(multiples)
1101
1102 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1103 return result_tens
1104
1105
1106 def build_gather(self, op, values, axis):
1107
1108 # Create a new indicies tensor
1109 # here with data that doesn't exceed the dimensions of the values tensor
1110
1111 max_val = values.shape[axis]
1112 indicies_arr = np.int32(self.rng.integers(low=0, high=max_val, size=[self.randInt(1, max_val + 1)]))
1113 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr)
1114
1115 result_tens = OutputShaper.gatherOp(self.ser, values, indicies, axis)
1116
1117 attr = ts.TosaSerializerAttribute()
1118 attr.AxisAttribute(axis)
1119
1120 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name], attr)
1121
1122 return result_tens
1123
1124 def build_resize(self, op, input, mode, stride, offset, shift, output_dims, output_dtype):
1125 result_tens = OutputShaper.resizeOp(self.ser, input, mode, stride, offset, shift, output_dims, output_dtype)
1126
1127 attr = ts.TosaSerializerAttribute()
1128 attr.ResizeAttribute(output_dims, stride, offset, shift, mode)
1129
1130 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1131 return result_tens
1132
1133 def build_identityn(self, op, val, val2):
1134
1135 result_tens = OutputShaper.unaryOp(self.ser, val)
1136 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
1137 self.ser.addOperator(op, [val.name, val2.name], [result_tens.name, result_tens2.name])
1138 return result_tens
1139
1140 def build_placeholder(self, op, val):
1141 # Add an identity op to avoid warning in the reference model
1142 return self.build_unary(Op.IDENTITY, val)
1143
1144 # Type Conversion
1145 def build_cast(self, op, val, out_dtype):
1146 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1147 self.ser.addOperator(op, [val.name], [result_tens.name])
1148 return result_tens
1149
1150 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1151 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1152
1153 if per_channel:
1154 nc = val.shape[-1]
1155 else:
1156 nc = 1
1157
1158 in_type_width = self.typeWidth(val.dtype)
1159 out_type_width = self.typeWidth(out_dtype)
1160
1161 if val.dtype == DType.AINT8:
1162 input_zp = self.randInt()
1163 in_type_width = in_type_width + 1
1164 else:
1165 input_zp = 0
1166
1167 if out_dtype == DType.AINT8:
1168 output_zp = self.randInt()
1169 out_type_width = out_type_width + 1
1170 else:
1171 output_zp = 0
1172
1173 # Calculate scale based on:
1174 # scale = a *(2^output_width)/(2^input_width))
1175
1176 a = np.float32(self.rng.random(size=[nc]))
1177 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1178
1179 if scale32:
1180 pass
1181 # Cap the scaling at 2^15 - 1 for scale16
1182 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1183 else:
1184 # Cap the scaling at 2^15 - 1 for scale16
1185 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1186
1187 #print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
1188
1189 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1190 shift_arr = np.int32(np.zeros(shape=[nc]))
1191
1192 for i in range(nc):
1193 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(scale_arr[i], scale32)
1194
1195 #print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
1196
1197 attr = ts.TosaSerializerAttribute()
1198 attr.RescaleAttribute(input_zp,
1199 output_zp,
1200 multiplier_arr,
1201 shift_arr,
1202 scale32,
1203 double_round,
1204
1205 per_channel)
1206
1207 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1208 return result_tens
1209
1210 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1211 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1212 # (except for the generated shap) and the condition. Build Then/Else blocks
1213 # and fill them with const nodes for the body.
1214
1215 # Condition tensor
1216 cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
1217
1218 # Make then/else tensors
1219 out_shape = then_tens.shape
1220 then_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1221 else_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1222
1223 # And the result tensor based on any of the outputs
1224 result_tens = self.ser.addOutput(out_shape, DType.INT32, Usage.ACTIVATION, [])
1225
1226 # Create the attribute with the names of the then/else blocks
1227 then_block = 'THEN_BLOCK'
1228 else_block = 'ELSE_BLOCK'
1229 attr = ts.TosaSerializerAttribute()
1230 attr.CondIfAttribute(then_block, else_block)
1231
1232 # Finally, build the op and the two blocks
1233 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1234
1235 self.ser.startBasicBlock(then_block)
1236 # Build the actual then/else tensors inside their blocks
1237 then_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], then_arr)
1238 self.ser.addOutputTensor(then_tens)
1239
1240 self.ser.startBasicBlock(else_block)
1241 else_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], else_arr)
1242 self.ser.addOutputTensor(else_tens)
1243
1244 return result_tens
1245
1246 def build_cond_if_binary(self, op, a, b, cond):
1247 # For cond_if with a binary op in the then/else blocks, take a and b and
1248 # alternately add or subtract them based on the condition
1249
1250 # Condition tensor
1251 cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
1252
1253 result_tens = self.ser.addOutput(a.shape, a.dtype, Usage.ACTIVATION, [])
1254 self.ser.currBasicBlock.addOutput(result_tens.name)
1255
1256 # Create the attribute with the names of the then/else blocks
1257 then_block = 'THEN_BLOCK'
1258 else_block = 'ELSE_BLOCK'
1259 attr = ts.TosaSerializerAttribute()
1260 attr.CondIfAttribute(then_block, else_block)
1261
1262 # Finally, build the op and the two blocks
1263 self.ser.addOperator(op, [cond_tens.name, a.name, b.name], [result_tens.name], attr)
1264
1265 self.ser.startBasicBlock(then_block)
1266 self.ser.addInputTensor(a)
1267 self.ser.addInputTensor(b)
1268 then_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1269 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1270
1271 self.ser.startBasicBlock(else_block)
1272 self.ser.addInputTensor(a)
1273 self.ser.addInputTensor(b)
1274 else_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1275 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1276
1277 return result_tens
1278
1279 def build_while_loop(self, op, a, iter_val):
1280 iter = self.ser.addPlaceholder([], DType.INT32, Usage.ACTIVATION, [], [np.int32(iter_val)])
1281
1282 cond_block = 'COND_BLOCK'
1283 body_block = 'BODY_BLOCK'
1284
1285 attr = ts.TosaSerializerAttribute()
1286 attr.WhileLoopAttribute(cond_block, body_block)
1287
1288 # Accumulator tensor
1289 #acc = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1290 acc_init_val = np.int32(np.zeros(a.shape))
1291 acc = self.ser.addPlaceholder(a.shape, a.dtype, a.usage, a.dformat, acc_init_val)
1292
1293 # Intermediate/output tensors for everything going through the loop
1294 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
1295 a_out = self.ser.addIntermediate(a.shape, a.dtype, a.usage, a.dformat)
1296 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
1297
1298 # While_loop operator
1299 self.ser.addOperator(op,
1300 [iter.name, a.name, acc.name],
1301 [iter_out.name, a_out.name, acc_out.name], attr)
1302
1303 # COND block (input: iter, output: cond_tens )
1304 self.ser.startBasicBlock(cond_block)
1305 self.ser.addInputTensor(iter)
1306 self.ser.addInputTensor(a)
1307 self.ser.addInputTensor(acc)
1308 zero_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(0)])
1309 cond_tens = self.ser.addOutput([], DType.BOOL, [], [])
1310 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name],
1311 [cond_tens.name])
1312
1313 # BODY block (input: a, acc, iter, output: a, acc, iter)
1314 # Note that local intermediate tensors need to be declared here for the outputs
1315 self.ser.startBasicBlock(body_block)
1316 self.ser.addInputTensor(iter)
1317 self.ser.addInputTensor(a)
1318 self.ser.addInputTensor(acc)
1319 one_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(1)])
1320 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
1321 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
1322 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1323 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1324 self.ser.addOutputTensor(iter_body_out)
1325 self.ser.addOutputTensor(a)
1326 self.ser.addOutputTensor(acc_body_out)
1327
1328 return acc_out
1329
1330
1331 def genOpTestList(self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None):
1332
1333 try:
1334 op = self.TOSA_OP_LIST[opName]
1335 except KeyError as e:
1336 raise Exception('Cannot find op with name {}'.format(opName))
1337
1338 # Initialize a new random number generator
1339 self.rng = np.random.default_rng(self.random_seed)
1340
1341 build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
1342
1343 # Generate the lists of arguments
1344 rmin, rmax = op['rank']
1345
1346 # Test list consists of a tuple of:
1347 # (opName, testNameStr, dtype, shapeList, argumentsList)
1348 testList = []
1349
1350 if not shapeFilter:
1351 shapeFilter = [None]
1352
1353 for r in range(rmin, rmax + 1):
1354
1355 # Filter out the rank?
1356 if rankFilter is not None and r not in rankFilter:
1357 continue
1358
1359 for t in op['types']:
1360
1361 # Filter tests based on dtype?
1362 if dtypeFilter is not None:
1363 if t not in dtypeFilter:
1364 continue
1365
1366 # Create the placeholder and const tensors
1367 for shape in shapeFilter:
1368 # A None shape chooses a random shape of a given rank
1369
1370 # Filter out by rank
1371 if shape is not None and len(shape) != r:
1372 continue
1373
1374 self.setTargetShape(shape)
1375 shapeList = tgen_fcn(self, op, r)
1376
1377 shapeStr = self.shapeStr(shapeList[0])
1378 typeStr = self.typeStr(t)
1379
1380 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1381 argList = []
1382 if agen_fcn:
1383 argList = agen_fcn(self, opName, shapeList, t)
1384 else:
1385 argList = [('', [])]
1386
1387 for argStr, args in argList:
1388 if argStr:
1389 testStr = '{}_{}_{}_{}'.format(opName, shapeStr, typeStr, argStr)
1390 else:
1391 testStr = '{}_{}_{}'.format(opName, shapeStr, typeStr)
1392
1393 testList.append((opName, testStr, t, shapeList, args))
1394
1395 return testList
1396
1397 def serializeTest(self, opName, testStr, dtype, shapeList, testArgs):
1398 try:
1399 op = self.TOSA_OP_LIST[opName]
1400 except KeyError as e:
1401 raise Exception('Cannot find op with name {}'.format(opName))
1402
1403 # Create a serializer
1404 self.createSerializer(opName, testStr)
1405
1406 build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
1407 pCount, cCount = op['operands']
1408
1409 try:
1410 qgen = op['qgen']
1411 except KeyError:
1412 qgen = None
1413
1414 # Build the random tensor operands and the test
1415 tens = []
1416 tens.extend(self.buildPlaceholderTensors(shapeList[0:pCount], dtype))
1417 tens.extend(self.buildConstTensors(shapeList[pCount:], dtype))
1418
1419 if qgen is not None:
1420 qinfo = qgen(self, op, dtype)
1421 else:
1422 qinfo = None
1423
1424 try:
1425 if qinfo is not None:
1426 resultName = build_fcn(self, op['op'], *tens, *testArgs, qinfo)
1427 else:
1428 resultName = build_fcn(self, op['op'], *tens, *testArgs)
1429 except TypeError as e:
1430 print('build_fcn: {}\nTensors: {}\nArgs: {}\n'.format(build_fcn, tens, testArgs))
1431 raise e
1432
1433 # Save the serialized test
1434 self.serialize('test')
1435
1436 def createDynamicOpLists(self):
1437
1438 # Dynamically create op lists for convolutions with a list of kernel sizes
1439 KERNELS = [ [1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3] ]
1440
1441 for k in KERNELS:
1442 testName = 'conv2d_{}x{}'.format(k[0], k[1])
1443 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['conv2d_TEMPLATE'].copy()
1444 self.TOSA_OP_LIST[testName]['filter'] = k
1445 self.TOSA_OP_LIST[testName]['template'] = False
1446
1447 testName = 'depthwise_conv2d_{}x{}'.format(k[0], k[1])
1448 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['depthwise_conv2d_TEMPLATE'].copy()
1449 self.TOSA_OP_LIST[testName]['filter'] = k
1450 self.TOSA_OP_LIST[testName]['template'] = False
1451
1452 testName = 'transpose_conv2d_{}x{}'.format(k[0], k[1])
1453 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['transpose_conv2d_TEMPLATE'].copy()
1454 self.TOSA_OP_LIST[testName]['filter'] = k
1455 self.TOSA_OP_LIST[testName]['template'] = False
1456
1457 # Delete any templates after having created any dynamic ops
1458 # This is a two-pass operation because it's bad practice to delete
1459 # keys from dictionaries while iterating
1460 keyList = []
1461 for k in self.TOSA_OP_LIST:
1462 try:
1463 if self.TOSA_OP_LIST[k]['template'] == True:
1464 keyList.append(k)
1465 continue
1466 except KeyError:
1467 pass
1468
1469 for k in keyList:
1470 del self.TOSA_OP_LIST[k]
1471
1472 def initOpListDefaults(self):
1473 '''Fill in default fields for ops if they aren't already specified.
1474 Look for missing required fields (datastructure linting).'''
1475 for op in self.TOSA_OP_LIST:
1476
1477 # Required fields
1478 try:
1479 pl, c = self.TOSA_OP_LIST[op]['operands']
1480 except (KeyError, ValueError, TypeError):
1481 raise Exception('Op {} is missing a valid operand tuple in TOSA_OP_LIST'.format(op))
1482
1483 try:
1484 fcn, tgen, arggen = self.TOSA_OP_LIST[op]['build_fcn']
1485 except (KeyError, ValueError, TypeError):
1486 raise Exception('Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST'.format(op))
1487
1488 try:
1489 types = self.TOSA_OP_LIST[op]['types']
1490 except KeyError as e:
1491 raise Exception('Op {} is missing a valid type list in TOSA_OP_LIST'.format(op))
1492
1493 try:
1494 opcode = self.TOSA_OP_LIST[op]['op']
1495 except KeyError as e:
1496 raise Exception('Op {} is missing the Op field in TOSA_OP_LIST'.format(op))
1497
1498 # Put in default rank range, if missing
1499 try:
1500 rank = self.TOSA_OP_LIST[op]['rank']
1501 except KeyError:
1502 self.TOSA_OP_LIST[op]['rank'] = self.DEFAULT_RANK_RANGE
1503
1504 # Tensor operator list
1505 # 'op': op name
1506 # 'operands': tuple of (placeholder, const) operands
1507 # 'rank': optional, restricts rank to tuple inclusive of (min, max), if not specified, defaults to (1, 4)
1508 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
1509 # 'types': array of datatypes to be tested
1510 TYPE_FP = [ DType.FLOAT ]
1511
1512 # Type with an aint8
1513 TYPE_INT = [ DType.AINT8, DType.INT16, DType.INT32 ] # Most operators support AINT8 instead of INT8, excludes INT4
1514 TYPE_INT_FP = [ DType.AINT8, DType.INT16, DType.INT32, DType.FLOAT ] # Most operators support AINT8 instead of INT8, excludes INT4
1515
1516 # Types with an int8
1517 TYPE_PURE_INT = [ DType.INT8, DType.INT16, DType.INT32 ] # Note: excludes INT4
1518 TYPE_PURE_INT_FP = [ DType.INT8, DType.INT16, DType.INT32, DType.FLOAT ] # Note: excludes INT4
1519 TYPE_BOOL = [ DType.BOOL ]
1520 TYPE_FI32 = [ DType.FLOAT, DType.INT32 ]
1521 TYPE_FIB = [ DType.FLOAT, DType.AINT8, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ]
1522 TYPE_FI16 = [ DType.FLOAT, DType.INT16 ]
1523
1524 TYPE_NARROW_INT_FP = [ DType.AINT8, DType.INT16, DType.FLOAT ]
1525
1526 DEFAULT_RANK_RANGE = (1, 4)
1527
1528 TOSA_OP_LIST = {
1529 # Binary ops
1530 'add':
1531 { 'op': Op.ADD,
1532 'operands': (2, 0),
1533 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1534 'types': TYPE_FI32 },
1535
1536 'arithmetic_right_shift':
1537 { 'op': Op.ARITHMETIC_RIGHT_SHIFT,
1538 'operands': (2, 0),
1539 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1540 'types': TYPE_PURE_INT },
1541
1542 'bitwise_and':
1543 { 'op': Op.BITWISE_AND,
1544 'operands': (2, 0),
1545 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1546 'types': TYPE_INT },
1547
1548 'bitwise_or':
1549 { 'op': Op.BITWISE_OR,
1550 'operands': (2, 0),
1551 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1552 'types': TYPE_INT },
1553
1554 'bitwise_xor':
1555 { 'op': Op.BITWISE_XOR,
1556 'operands': (2, 0),
1557 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1558 'types': TYPE_INT },
1559
1560 'logical_and':
1561 { 'op': Op.LOGICAL_AND,
1562 'operands': (2, 0),
1563 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1564 'types': TYPE_BOOL },
1565
1566 'logical_left_shift':
1567 { 'op': Op.LOGICAL_LEFT_SHIFT,
1568 'operands': (2, 0),
1569 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1570 'types': TYPE_PURE_INT },
1571
1572 'logical_right_shift':
1573 { 'op': Op.LOGICAL_RIGHT_SHIFT,
1574 'operands': (2, 0),
1575 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1576 'types': TYPE_PURE_INT },
1577
1578 'logical_or':
1579 { 'op': Op.LOGICAL_OR,
1580 'operands': (2, 0),
1581 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1582 'types': TYPE_BOOL },
1583
1584 'logical_xor':
1585 { 'op': Op.LOGICAL_XOR,
1586 'operands': (2, 0),
1587 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1588 'types': TYPE_BOOL },
1589
1590 'max':
1591 { 'op': Op.MAXIMUM,
1592 'operands': (2, 0),
1593 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1594 'types': TYPE_FI32 },
1595
1596 'min':
1597 { 'op': Op.MINIMUM,
1598 'operands': (2, 0),
1599 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1600 'types': TYPE_FI32 },
1601
1602 'mul':
1603 { 'op': Op.MUL,
1604 'operands': (2, 0),
1605 'build_fcn': (build_mul, TosaTensorGen.tgBroadcastFuzz, None),
1606 'types': TYPE_PURE_INT_FP },
1607
1608 'pow':
1609 { 'op': Op.POW,
1610 'operands': (2, 0),
1611 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBasic, None),
1612 'types': TYPE_FP },
1613
1614 'sub':
1615 { 'op': Op.SUB,
1616 'operands': (2, 0),
1617 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1618 'types': TYPE_FI32 },
1619
1620 'table':
1621 { 'op': Op.TABLE,
1622 # Use the automatic generation functions to create the input array
1623 # but create the table tensor in the build function, as it may be
1624 # a different type from the input
1625 'operands': (1, 0),
1626 'build_fcn': (build_table, TosaTensorGen.tgBasic, None),
1627 'types': [ DType.INT16 ] },
1628
1629 'argmax':
1630 { 'op': Op.ARGMAX,
1631 'operands': (1, 0),
1632 'build_fcn': (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1633 'types': TYPE_FP },
1634
1635 # Templated operator. Filled in by createDynamicOpLists
1636 'conv2d_TEMPLATE':
1637 { 'op': Op.CONV2D,
1638 'operands': (1, 2),
1639 'rank': (4, 4),
1640 'build_fcn': (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
1641 'qgen': TosaQuantGen.qgConv,
1642 'types': TYPE_FP,
1643 'template': True },
1644
1645 # Templated operator. Filled in by createDynamicOpLists
1646 'depthwise_conv2d_TEMPLATE':
1647 { 'op': Op.DEPTHWISE_CONV2D,
1648 'operands': (1, 2),
1649 'filter': [1, 1],
1650 'rank': (4, 4),
1651 'build_fcn': (build_depthwise_conv2d, TosaTensorGen.tgDepthwiseConv2D, TosaArgGen.agConv2D),
1652 'qgen': TosaQuantGen.qgConv,
1653 'types': TYPE_FP,
1654 'template': True },
1655
1656 # Templated operator. Filled in by createDynamicOpLists
1657 'transpose_conv2d_TEMPLATE':
1658 { 'op': Op.TRANSPOSE_CONV2D,
1659 'operands': (1, 1),
1660 'rank': (4, 4),
1661 'build_fcn': (build_transpose_conv2d, TosaTensorGen.tgTransposeConv2D, TosaArgGen.agTransposeConv2D),
1662 'qgen': TosaQuantGen.qgConv,
1663 'types': TYPE_FP,
1664 'template': True },
1665
1666 'fully_connected':
1667 { 'op': Op.FULLY_CONNECTED,
1668 'operands': (2, 0),
1669 'rank': (2, 2),
1670 'build_fcn': (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
1671 'qgen': TosaQuantGen.qgConv,
1672 'types': TYPE_FP },
1673
1674 'matmul':
1675 { 'op': Op.MATMUL,
1676 'operands': (2, 0),
1677 'rank': (2, 2),
1678 'build_fcn': (build_matmul, TosaTensorGen.tgMatmul, None),
1679 'qgen': TosaQuantGen.qgMatmul,
1680 'types': TYPE_NARROW_INT_FP },
1681
1682 # Unary operators
1683 'abs':
1684 { 'op': Op.ABS,
1685 'operands': (1, 0),
1686 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1687 'types': TYPE_FI32 },
1688
1689 'bitwise_not':
1690 { 'op': Op.BITWISE_NOT,
1691 'operands': (1, 0),
1692 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1693 'types': TYPE_INT },
1694
1695 'ceil':
1696 { 'op': Op.CEIL,
1697 'operands': (1, 0),
1698 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1699 'types': TYPE_FP },
1700
1701 'clz':
1702 { 'op': Op.CLZ,
1703 'operands': (1, 0),
1704 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1705 'types': [ DType.INT32 ] },
1706
1707 'exp':
1708 { 'op': Op.EXP,
1709 'operands': (1, 0),
1710 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1711 'types': TYPE_FP },
1712
1713 'floor':
1714 { 'op': Op.FLOOR,
1715 'operands': (1, 0),
1716 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1717 'types': TYPE_FP },
1718
1719 'log':
1720 { 'op': Op.LOG,
1721 'operands': (1, 0),
1722 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1723 'types': TYPE_FP },
1724
1725 'floor':
1726 { 'op': Op.FLOOR,
1727 'operands': (1, 0),
1728 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1729 'types': TYPE_FP },
1730
1731 'logical_not':
1732 { 'op': Op.LOGICAL_NOT,
1733 'operands': (1, 0),
1734 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1735 'types': TYPE_BOOL },
1736
1737 'negate':
1738 { 'op': Op.NEGATE,
1739 'operands': (1, 0),
1740 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1741 'qgen': TosaQuantGen.qgUnary,
1742 'types': TYPE_INT_FP },
1743
1744 'reciprocal':
1745 { 'op': Op.RECIPROCAL,
1746 'operands': (1, 0),
1747 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1748 'types': TYPE_FP },
1749
1750 'rsqrt':
1751 { 'op': Op.RSQRT,
1752 'operands': (1, 0),
1753 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1754 'types': TYPE_FP },
1755
1756 # Ternary operators
1757 'select':
1758 { 'op': Op.SELECT,
1759 'operands': (3, 0),
1760 'build_fcn': (build_select, TosaTensorGen.tgBroadcastFuzz, None),
1761 'types': TYPE_FIB },
1762
1763 # Comparison operators
1764 'equal':
1765 { 'op': Op.EQUAL,
1766 'operands': (2, 0),
1767 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1768 'types': TYPE_FI32 },
1769
1770 'greater_equal':
1771 { 'op': Op.GREATER_EQUAL,
1772 'operands': (2, 0),
1773 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1774 'types': TYPE_FI32 },
1775
1776 'greater':
1777 { 'op': Op.GREATER,
1778 'operands': (2, 0),
1779 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1780 'types': TYPE_FI32 },
1781
1782 # Pooling operators
1783 'avg_pool2d':
1784 { 'op': Op.AVG_POOL2D,
1785 'operands': (1, 0),
1786 'rank': (4, 4),
1787 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1788 'qgen': TosaQuantGen.qgUnary,
1789 'types': TYPE_NARROW_INT_FP },
1790
1791
1792 'max_pool2d':
1793 { 'op': Op.MAX_POOL2D,
1794 'operands': (1, 0),
1795 'rank': (4, 4),
1796 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1797 'types': TYPE_NARROW_INT_FP },
1798
1799 # Reduce operators
1800 'reduce_any':
1801 { 'op': Op.REDUCE_ANY,
1802 'operands': (1, 0),
1803 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1804 'types': TYPE_BOOL },
1805
1806 'reduce_all':
1807 { 'op': Op.REDUCE_ALL,
1808 'operands': (1, 0),
1809 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1810 'types': TYPE_BOOL },
1811
1812 'reduce_max':
1813 { 'op': Op.REDUCE_MAX,
1814 'operands': (1, 0),
1815 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1816 'types': TYPE_INT_FP },
1817
1818 'reduce_min':
1819 { 'op': Op.REDUCE_MAX,
1820 'operands': (1, 0),
1821 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1822 'types': TYPE_INT_FP },
1823
1824 'reduce_product':
1825 { 'op': Op.REDUCE_PRODUCT,
1826 'operands': (1, 0),
1827 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1828 'types': TYPE_FP },
1829
1830 'reduce_sum':
1831 { 'op': Op.REDUCE_SUM,
1832 'operands': (1, 0),
1833 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1834 'types': TYPE_FI32 },
1835
1836 # Activation functions
1837 'clamp':
1838 { 'op': Op.CLAMP,
1839 'operands': (1, 0),
1840 'build_fcn': (build_clamp, TosaTensorGen.tgBasic, None),
1841 'types': TYPE_NARROW_INT_FP },
1842
1843 'relun':
1844 { 'op': Op.RELUN,
1845 'operands': (1, 0),
1846 'build_fcn': (build_relun, TosaTensorGen.tgBasic, None),
1847 'types': TYPE_FI32 },
1848
1849 'sigmoid':
1850 { 'op': Op.SIGMOID,
1851 'operands': (1, 0),
1852 'build_fcn': (build_sigmoid, TosaTensorGen.tgBasic, None),
1853 'types': TYPE_FP },
1854
1855 'tanh':
1856 { 'op': Op.TANH,
1857 'operands': (1, 0),
1858 'build_fcn': (build_tanh, TosaTensorGen.tgBasic, None),
1859 'types': TYPE_FP },
1860
1861 # Data layout operators
1862 'concat':
1863 { 'op': Op.CONCAT,
1864 'operands': (2, 0),
1865 'build_fcn': (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1866 'types': TYPE_FIB },
1867
1868 'pad':
1869 { 'op': Op.PAD,
1870 'operands': (1, 0),
1871 'build_fcn': (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
1872 'qgen': TosaQuantGen.qgPad,
1873 'types': TYPE_FIB },
1874
1875 'reshape':
1876 { 'op': Op.RESHAPE,
1877 'operands': (1, 0),
1878 'build_fcn': (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
1879 'types': TYPE_FIB },
1880
1881 'reverse':
1882 { 'op': Op.REVERSE,
1883 'operands': (1, 0),
1884 'build_fcn': (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1885 'types': TYPE_FIB },
1886
1887 'slice':
1888 { 'op': Op.SLICE,
1889 'operands': (1, 0),
1890 'build_fcn': (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
1891 'types': TYPE_FIB },
1892
1893 'tile':
1894 { 'op': Op.TILE,
1895 'operands': (1, 0),
1896 'build_fcn': (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
1897 'types': TYPE_FIB },
1898
1899 'transpose':
1900 { 'op': Op.TRANSPOSE,
1901 'operands': (1, 0),
1902 'rank': (2, 4), # Do not allow tranpose on rank=1
1903 'build_fcn': (build_transpose, TosaTensorGen.tgBasic, TosaArgGen.agTranspose),
1904 'types': TYPE_FIB },
1905
1906 # Scatter/Gather
1907 'gather':
1908 { 'op': Op.GATHER,
1909 'operands': (1, 0),
1910 'build_fcn': (build_gather, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1911 'types': TYPE_INT },
1912
1913
1914 # Image operations
1915 'resize':
1916 { 'op': Op.RESIZE,
1917 'operands': (1, 0),
1918 'rank': (4, 4),
1919 'build_fcn': ( build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
1920 'types': [ DType.INT8, DType.INT16 ] },
1921
1922
1923 # Data nodes
1924 'placeholder':
1925 { 'op': Op.PLACEHOLDER,
1926 'operands': (1, 0),
1927 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
1928 'types': TYPE_FIB },
1929
1930 'const':
1931 { 'op': Op.CONST,
1932 'operands': (1, 0),
1933 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
1934 'types': TYPE_FIB },
1935
1936
1937 'identity':
1938 { 'op': Op.IDENTITY,
1939 'operands': (1, 0),
1940 'build_fcn': ( build_unary, TosaTensorGen.tgBasic, None),
1941 'types': TYPE_FIB },
1942
1943
1944 'identityn':
1945 { 'op': Op.IDENTITYN,
1946 'operands': (2, 0),
1947 'build_fcn': ( build_identityn, TosaTensorGen.tgBasic, None),
1948 'types': TYPE_FIB },
1949
1950 # Type conversion
1951 'cast':
1952 { 'op': Op.CAST,
1953 'operands': (1, 0),
1954 'build_fcn': ( build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast ),
1955 'types': [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] },
1956
1957 'rescale':
1958 { 'op': Op.RESCALE,
1959 'operands': (1, 0),
1960 'build_fcn': ( build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale ),
1961 'types': [ DType.AINT8, DType.INT16, DType.INT32, DType.INT48 ] },
1962
1963 # Custom
1964 # Not implemented.
1965
1966 # Control flow
1967
1968 # Two varients of cond_if, one that generates one of two constant tensors (no
1969 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
1970 # (two inputs to the basic blocks, one output)
1971 'cond_if_const':
1972 { 'op': Op.COND_IF,
1973 'operands': (0, 2),
1974 'build_fcn': ( build_cond_if_const, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
1975 'types': [ DType.BOOL ] },
1976
1977 'cond_if_binary':
1978 { 'op': Op.COND_IF,
1979 'operands': (2, 0),
1980 'build_fcn': ( build_cond_if_binary, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
1981 'types': TYPE_FI32 },
1982
1983 # while_loop
1984 'while_loop':
1985 { 'op': Op.WHILE_LOOP,
1986 'operands': (0, 1),
1987 'build_fcn': ( build_while_loop, TosaTensorGen.tgBasic, TosaArgGen.agWhileLoop ),
1988 'types': [DType.INT32] },
1989
1990
1991 }
1992
1993class OutputShaper:
1994 # Methods in this class compute the expected output shape and datatype
1995 # for common classes of operations
1996 def __init__(self):
1997 pass
1998
1999 # These methods return arguments that can be used for
2000 # creating a new output tensor
2001 @staticmethod
2002 def binaryBroadcastOp(ser, a, b):
2003 assert(len(a.shape) == len(b.shape))
2004 assert(a.dtype == b.dtype)
2005
2006 shape = []
2007 for i in range(len(a.shape)):
2008 if a.shape[i] == 1:
2009 shape.append(b.shape[i])
2010 else:
2011 shape.append(a.shape[i])
2012
2013 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2014
2015 @staticmethod
2016 def binaryNonBroadcastOp(ser, a, b):
2017 assert(len(a.shape) == len(b.shape))
2018 assert(a.dtype == b.dtype)
2019
2020 shape = []
2021 for i in range(len(a.shape)):
2022 assert(a.shape[i] == b.shape[i])
2023 shape.append(a.shape[i])
2024
2025 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2026
2027 @staticmethod
2028 def unaryOp(ser, a):
2029 return ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
2030
2031 @staticmethod
2032 def selectOp(ser, cond, a, b):
2033 assert(len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape))
2034 assert(a.dtype == b.dtype)
2035
2036 shape = []
2037 for i in range(len(a.shape)):
2038 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2039
2040 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2041
2042 @staticmethod
2043 def binaryComparisonOp(ser, a, b):
2044 assert(len(a.shape) == len(b.shape))
2045 assert(a.dtype == b.dtype)
2046
2047 # Do broadcast
2048 shape = []
2049 for i in range(len(a.shape)):
2050 if a.shape[i] == 1:
2051 shape.append(b.shape[i])
2052 else:
2053 shape.append(a.shape[i])
2054
2055 # Force the output type to bool
2056 return ser.addOutput(shape, DType.BOOL, a.usage, a.dformat)
2057
2058 @staticmethod
2059 def reduceOp(ser, a, axis):
2060
2061 shape = a.shape.copy()
2062
2063 shape[axis] = 1
2064
2065 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2066
2067 @staticmethod
2068 def argmaxOp(ser, a, axis):
2069 shape = a.shape.copy()
2070 del shape[axis]
2071 return ser.addOutput(shape, DType.INT32, a.usage, a.dformat)
2072
2073 @staticmethod
2074 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2075
2076 # IFM: NHWC
2077 # Filter: OHWI
2078 # OFM: NHWC
2079
2080 if len(padding) == 2:
2081 # Expand padding to 4 parameters in the case of transpose_conv2d
2082 # From H,W to T,B,L,R
2083 padding = [padding[0], padding[0], padding[1], padding[1]]
2084
2085 h = (ifm.shape[1] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[0] - 1) + \
2086 padding[0] + padding[1]) // strides[0] + 1
2087
2088 w = (ifm.shape[2] - filter.shape[2] - (filter.shape[2] - 1) * (dilations[1] - 1) + \
2089 padding[2] + padding[3]) // strides[1] + 1
2090
2091 if h <= 0 or w <= 0:
2092 # Invalid test parameters?
2093 h = 0
2094 w = 0
2095 ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
2096
2097 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2098
2099 if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8:
2100 out_dtype = DType.INT32
2101 elif ifm.dtype == DType.INT16:
2102 out_dtype = DType.INT48
2103 elif ifm.dtype == DType.FLOAT:
2104 out_dtype = DType.FLOAT
2105 else:
2106 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2107
2108 return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
2109
2110 @staticmethod
2111 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2112 # IFM: NHWC
2113 # Filter: HWCM
2114 # OFM: NHW C*M
2115 h = (ifm.shape[1] - filter.shape[0] - (filter.shape[0] - 1) * (dilations[0] - 1) + \
2116 padding[0] + padding[1]) // strides[0] + 1
2117
2118 w = (ifm.shape[2] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[1] - 1) + \
2119 padding[2] + padding[3]) // strides[1] + 1
2120
2121 if h <= 0 or w <= 0:
2122 # Invalid test parameters?
2123 h = 0
2124 w = 0
2125 ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
2126
2127 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2128
2129 if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8:
2130 out_dtype = DType.INT32
2131 elif ifm.dtype == DType.INT16:
2132 out_dtype = DType.INT48
2133 elif ifm.dtype == DType.FLOAT:
2134 out_dtype = DType.FLOAT
2135 else:
2136 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2137
2138 return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
2139
2140
2141 @staticmethod
2142 def pool2dOp(ser, ifm, kernel, stride, pad):
2143 # input: NHWC
2144 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2145 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2146
2147 if h <= 0 or w <= 0:
2148 # Invalid test parameters?
2149 h = 0
2150 w = 0
2151 ser.setExpectedFailure(True, 'Invalid combination of pooling parameters')
2152
2153 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
2154 return ser.addOutput(ofm_shape, ifm.dtype, ifm.usage, ifm.dformat)
2155
2156 @staticmethod
2157 def fullyConnectedOp(ser, input, filter):
2158 # input: N, IC
2159 # filter: OC, IC
2160 # output: N, OC
2161
2162 output_shape = [input.shape[0], filter.shape[0]]
2163
2164 if input.dtype == DType.AINT8 or input.dtype == DType.INT8:
2165 out_dtype = DType.INT32
2166 elif input.dtype == DType.INT16:
2167 out_dtype = DType.INT48
2168 elif input.dtype == DType.FLOAT:
2169 out_dtype = DType.FLOAT
2170 else:
2171 raise Exception('Unsupported input dtype: {}'.format(input.dtype))
2172
2173 return ser.addOutput(output_shape, out_dtype, input.usage, input.dformat)
2174
2175 @staticmethod
2176 def matmulOp(ser, a, b):
2177 # a: M, K
2178 # b: K, N
2179 # out: M, N
2180
2181 output_shape = [a.shape[0], b.shape[1]]
2182
2183
2184 if a.dtype == DType.AINT8:
2185 out_dtype = DType.INT32
2186 elif a.dtype == DType.INT16:
2187 out_dtype = DType.INT48
2188 elif a.dtype == DType.FLOAT:
2189 out_dtype = DType.FLOAT
2190 else:
2191 raise Exception('UNsupported input dtype for matmul: {}'.format(a.dtype))
2192
2193 return ser.addOutput(output_shape, out_dtype, a.usage, a.dformat)
2194
2195 @staticmethod
2196 def concatOp(ser, a, b, axis):
2197
2198 output_shape = a.shape.copy()
2199 output_shape[axis] = a.shape[axis] + b.shape[axis]
2200
2201 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2202
2203 @staticmethod
2204 def padOp(ser, a, padding):
2205
2206 output_shape = a.shape.copy()
2207
2208 for i in range(len(output_shape)):
2209 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2210
2211 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2212
2213 @staticmethod
2214 def reshapeOp(ser, a, shape):
2215 output_shape = shape.copy()
2216
2217 totalElements = 1
2218 for i in a.shape:
2219 totalElements *= i
2220
2221 # If there are any -1 elements, figure out what that dimension must be
2222 totalOutputElements = 1
2223 for i in output_shape:
2224 if i != -1:
2225 totalOutputElements *= i
2226
2227 # And fill it in
2228 for i in range(len(output_shape)):
2229 if output_shape[i] == -1:
2230 output_shape[i] = totalElements // totalOutputElements
2231
2232 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2233
2234 @staticmethod
2235 def sliceOp(ser, a, begin, size):
2236
2237 output_shape = size.copy()
2238 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2239
2240 @staticmethod
2241 def tileOp(ser, a, multiples):
2242
2243 output_shape = a.shape.copy()
2244 assert(len(multiples) == len(output_shape))
2245
2246 for i in range(len(output_shape)):
2247 output_shape[i] = a.shape[i] * multiples[i]
2248
2249 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2250
2251 @staticmethod
2252 def transposeOp(ser, a, perms):
2253 output_shape = a.shape.copy()
2254 assert(len(perms) == len(output_shape))
2255
2256 for i in range(len(output_shape)):
2257 output_shape[i] = a.shape[perms[i]]
2258
2259 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2260
2261 @staticmethod
2262 def gatherOp(ser, values, indicies, axis):
2263 # indicies minus the axis + values - the indexes used to look up values.
2264 output_shape = [*values.shape[0:axis], indicies.shape[0], *values.shape[axis+1:]]
2265
2266 return ser.addOutput(output_shape, values.dtype, indicies.usage, indicies.dformat)
2267
2268 @staticmethod
2269 def tableOp(ser, input, table):
2270 # Same shape as the input, but with the type of the table.
2271 return ser.addOutput(input.shape, DType.INT32, input.usage, input.dformat)
2272
2273 @staticmethod
2274 def resizeOp(ser, input, mode, stride, offset, shift, output_dims, output_dtype):
2275
2276 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2277
2278 if stride[0] <= 0 or stride[1] <= 0:
2279 ser.setExpectedFailure(True, 'Negative or zero stride')
2280
2281 return ser.addOutput(output_dims, output_dtype, input.usage, input.dformat)
2282
2283 @staticmethod
2284 def typeConversionOp(ser, val, out_dtype):
2285 return ser.addOutput(val.shape, out_dtype, val.usage, val.dformat)
2286
2287 @staticmethod
2288 def transposeConv2DOp(ser, ifm, output_shape):
2289 if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8:
2290 out_dtype = DType.INT32
2291 elif ifm.dtype == DType.INT16:
2292 out_dtype = DType.INT48
2293 elif ifm.dtype == DType.FLOAT:
2294 out_dtype = DType.FLOAT
2295 else:
2296 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2297
2298 if output_shape[1] <= 0 or output_shape[2] <= 0:
2299 ser.setExpectedFailure(True, 'Negative output shape')
2300
2301 return ser.addOutput(output_shape, out_dtype, ifm.usage, ifm.dformat)