blob: 57030e726ee7e46c88c1030b77436e0a843d878d [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
32
33from enum import IntEnum, Enum, unique
34
35import tosa_serializer as ts
36from tosa_serializer import *
37import tosa
38
39# Convenience variables to the flatc-generated types that should be enums, but aren't
40DType = tosa.DType.DType()
41Usage = tosa.Usage.Usage()
42Format = tosa.Format.Format()
43Op = tosa.Op.Op()
44ResizeMode = tosa.ResizeMode.ResizeMode()
45
46class TosaQuantGen:
47 '''QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion'''
48 def __init__(self):
49 pass
50
51 @staticmethod
52 def needsQinfo(op, dtype):
Kevin Cheng3a478572021-01-22 17:21:02 -080053 if dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -070054 return True
55 return False
56
57 @staticmethod
58 def qgUnary(testGen, op, dtype):
59 qinfo = ts.TosaSerializerQuantInfo()
60 if TosaQuantGen.needsQinfo(op, dtype):
61 qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt())
62 else:
63 qinfo.UnaryQuantInfo(0, 0)
64 return qinfo
65
66 @staticmethod
67 def qgConv(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
69 if TosaQuantGen.needsQinfo(op, dtype):
70 qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt())
71 else:
72 qinfo.ConvQuantInfo(0, 0)
73 return qinfo
74
75 @staticmethod
76 def qgMatmul(testGen, op, dtype):
77 qinfo = ts.TosaSerializerQuantInfo()
78 if TosaQuantGen.needsQinfo(op, dtype):
79 qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt())
80 else:
81 qinfo.MatMulQuantInfo(0, 0)
82 return qinfo
83
84 @staticmethod
85 def qgPad(testGen, op, dtype):
86 qinfo = ts.TosaSerializerQuantInfo()
87 if TosaQuantGen.needsQinfo(op, dtype):
88 qinfo.PadQuantInfo(testGen.randInt())
89 else:
90 qinfo.PadQuantInfo(0)
91 return qinfo
92
93 @staticmethod
94 def computeMultiplierAndShift(scaleFp, scale32):
95 # Derived from computeMultiplierAndShiftTosaScale32
96 # Provide a floating-point scaling factor and the scale32 parameter
97 # to compute the multiplier and shift
98
99 if scale32:
100 scaleBits = 31
101 else:
102 scaleBits = 15
103
104 m, shift = math.frexp(scaleFp)
105
106 if scaleFp < 0.0:
107 m = -m
108
109 multiplier = round(m * (1 << scaleBits))
110 assert(multiplier <= (1 << scaleBits))
111
112 if multiplier == (1 << scaleBits):
113 multiplier = multiplier // 2
114 shift = shift + 1
115
116 shift = (-shift) + scaleBits
117 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
118
119 assert(multiplier <= (1 << scaleBits))
120 assert(shift >= 0 and shift <= 63)
121
122 return multiplier, shift
123
124
125class TosaTensorGen():
126 ''' Tensor generators create a shape list for the placeholder and const tensor
127 data operands for the operator. The actual random data is generated separately for each test.'''
128 def __init__(self):
129 pass
130
131 @staticmethod
132 def tgBasic(testGen, opName, rank):
133 pl, const = opName['operands']
134 shape = testGen.makeShape(rank)
135
136 shape_list = []
137 for i in range(pl + const):
138 shape_list.append(shape.copy())
139
140 return shape_list
141
142 @staticmethod
143 def tgNHWC(testGen, opName, rank):
144 pl, const = opName['operands']
145
146 assert(rank == 4)
147
148 shape = testGen.makeShape(rank)
149
150 # Constrict the batch size?
151 if testGen.args.max_batch_size:
152 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
153
154 shape_list = []
155 for i in range(pl + const):
156 shape_list.append(shape.copy())
157
158 return shape_list
159
160 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800161 def tgScatter(testGen, opName, rank):
162 pl, const = opName['operands']
163
164 assert(pl == 2)
165 assert(const == 0)
166 assert(rank == 3)
167
168 values_in_shape = testGen.makeShape(rank)
169
170 # Constrict the batch size?
171 if testGen.args.max_batch_size:
172 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
173
174 W = testGen.randInt(testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1])
175 input_shape = [values_in_shape[0], W, values_in_shape[2]]
176
177 shape_list = []
178 shape_list.append(values_in_shape.copy())
179 shape_list.append(input_shape.copy())
180
181 return shape_list
182
183 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700184 def tgBroadcastFuzz(testGen, op, rank):
185 shape = testGen.makeShape(rank)
186
187 pl, const = op['operands']
188
189 shape_list = []
190
191 # Choose one of the inputs to broadcast
192 bcast_idx = testGen.randInt(0, pl + const)
193 for i in range(pl + const):
194 shape_bcast = shape.copy()
195
196 # If the chosen input, pick a random index to broadcast
197 if i == bcast_idx:
198 fuzz_idx = testGen.randInt(0, rank)
199 shape_bcast[fuzz_idx] = 1
200
201 shape_list.append(shape_bcast)
202
203 return shape_list
204
205 @staticmethod
206 def tgConv2D(testGen, op, rank):
207 pl, const = op['operands']
208
209 assert(rank == 4)
210
211 # IFM dimensions are NHWC
212 ifm_shape = testGen.makeShape(rank)
213
214 # Constrict the batch size?
215 if testGen.args.max_batch_size:
216 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
217
218 # Get the filter height/width from the operator parameters
219 filter_hw = op['filter']
220
221 # Generate a random OFM depth
222 ofm_depth = testGen.makeShape(1)[0]
223
224 # The filter dimensions are OHWI
225 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
226
227 # The bias is OC
228 bias_shape = np.asarray([ofm_depth])
229
230 return [ifm_shape, filter_shape, bias_shape]
231
232 @staticmethod
233 def tgTransposeConv2D(testGen, op, rank):
234 pl, const = op['operands']
235
236 assert(rank == 4)
237
238 # IFM dimensions are NHWC
239 ifm_shape = testGen.makeShape(rank)
240
241 # Constrict the batch size?
242 if testGen.args.max_batch_size:
243 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
244
245 # Get the filter height/width from the operator parameters
246 filter_hw = op['filter']
247
248 # Generate a random OFM depth
249 ofm_depth = testGen.makeShape(1)[0]
250
251 # The filter dimensions are OHWI
252 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
253
254 return [ifm_shape, filter_shape]
255
256 @staticmethod
257 def tgDepthwiseConv2D(testGen, op, rank):
258 pl, const = op['operands']
259
260 assert(rank == 4)
261 assert(pl == 1 and const == 2)
262
263 # IFM dimensions are NHWC
264 ifm_shape = testGen.makeShape(rank)
265
266 # Constrict the batch size?
267 if testGen.args.max_batch_size:
268 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
269
270 # Get the filter height/width from the operator parameters
271 # Filter is KH, HW, C, M
272 filter_hw = op['filter']
273
274 # Generate a random OFM depth, but don't let it get too big because
275 # the output depth is M * C
276 filter_m = (testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)) + 1
277
278 # The filter dimensions are HWCM
279 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
280
281 # The bias is M * C
282 bias_shape = np.asarray([ifm_shape[3] * filter_m])
283
284 return [ifm_shape, filter_shape, bias_shape]
285
286 @staticmethod
287 def tgFullyConnected(testGen, op, rank):
288 pl, const = op['operands']
289
290 assert(rank == 2)
291 assert(pl == 2 and const == 0)
292
293 input_shape = testGen.makeShape(rank)
294 filter_oc = testGen.makeShape(1)[0]
295 filter_shape = np.asarray([filter_oc, input_shape[1]])
296
297 bias_shape = np.asarray([filter_oc])
298
299 return [input_shape, filter_shape, bias_shape]
300
301 @staticmethod
302 def tgMatmul(testGen, op, rank):
303 pl, const = op['operands']
304
305 assert(rank == 2)
306 assert(pl == 2 and const == 0)
307
308 a_shape = testGen.makeShape(rank)
309 b_oc = testGen.makeShape(1)[0]
310 b_shape = np.asarray([a_shape[1], b_oc])
311
312 return [a_shape, b_shape]
313
314class TosaArgGen:
315 '''Argument generators create exhaustive or random lists of attributes for operators that take
316 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
317 tuples where the descriptive_name is appended to the test name and the arglist is expanded
318 as arguments to the operator build function.'''
319 def __init__(self):
320 pass
321
322 @staticmethod
323 def agNone(testGen, opName, shapeList, dtype):
324 '''A trivial argument generator for operators that don't take any
325 non-tensor arguments'''
326 return [('', [])]
327
328 @staticmethod
329 def agAxis(testGen, opName, shapeList, dtype):
330 '''Build the axis argument for operators that take a single axis'''
331 axes = []
332
333 shape = shapeList[0]
334
335 for a in range(0, len(shape)):
336 axes.append(('axis_{}'.format(a), [a]))
337 return axes
338
339 @staticmethod
340 def agConv2D(testGen, opName, shapeList, dtype):
341 arg_list = []
342
343 ifm_shape = shapeList[0]
344 filter_shape = shapeList[1]
345
346 # Must be rank 4
347 assert(len(ifm_shape) == 4)
348 assert(len(filter_shape) == 4)
349
350 maxStride = testGen.args.max_conv_stride
351 maxPadding = testGen.args.max_conv_padding + 1
352 maxDilation = testGen.args.max_conv_dilation
353
354 # Strides, padding, dilations
355 for stride in range(0, maxStride ** 2):
356 for padding in range(0, (maxPadding) ** 4):
357 for dilation in range(0, maxDilation ** 2):
358
359 s = [stride // maxStride + 1,
360 stride % maxStride + 1]
361 p = [(padding // (maxPadding * 4)) % maxPadding,
362 (padding // (maxPadding * 2)) % maxPadding,
363 (padding // (maxPadding * 1)) % maxPadding,
364 padding % maxPadding]
365 d = [ dilation // maxDilation + 1,
366 dilation % maxDilation + 1]
367
368 # 4 padding parameters for regular conv2d
369 arg_list.append(('st{}{}_pad{}{}{}{}_dilat{}{}'.format(s[0], s[1],
370 p[0], p[1], p[2], p[3],
371 d[0], d[1]),
372 [ s, p, d ]))
373 return arg_list
374
375 @staticmethod
376 def agTransposeConv2D(testGen, opName, shapeList, dtype):
377 arg_list = []
378
379 ifm_shape = shapeList[0]
380 filter_shape = shapeList[1]
381
382 # Must be rank 4
383 assert(len(ifm_shape) == 4)
384 assert(len(filter_shape) == 4)
385
386 maxStride = testGen.args.max_conv_stride
387 maxPadding = testGen.args.max_conv_padding + 1
388 maxDilation = testGen.args.max_conv_dilation
389
390 # Strides, padding, dilations
391 for stride in range(0, maxStride ** 2):
392 for out_padding in range(0, (maxPadding) ** 2):
393 for dilation in range(0, maxDilation ** 2):
394
395 s = [stride // maxStride + 1,
396 stride % maxStride + 1]
397 p = [(out_padding // (maxPadding * 1)) % maxPadding,
398 out_padding % maxPadding]
399 d = [ dilation // maxDilation + 1,
400 dilation % maxDilation + 1]
401
402 oh = (ifm_shape[1] - filter_shape[1] - (filter_shape[1] - 1) * (d[0] - 1) + \
403 2 * p[0]) // s[0] + 1
404
405 ow = (ifm_shape[2] - filter_shape[2] - (filter_shape[2] - 1) * (d[1] - 1) + \
406 2 * p[1]) // s[1] + 1
407
408 # Output shape
409 os = [ ifm_shape[0], oh, ow, filter_shape[0] ]
410
411 arg_list.append(('st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}'.format(s[0], s[1],
412 p[0], p[1],
413 d[0], d[1],
414 os[0], os[1], os[2], os[3]),
415 [ s, p, d, os ]))
416
417 return arg_list
418
419 @staticmethod
420 def agPad(testGen, opName, shapeList, dtype):
421 arg_list = []
422 rank = len(shapeList[0])
423
424 # Exhaustively test combinations of 0/1 padding on each side of each dimension
425 # This process might need some revision for >1 padding, but use rank**2 as a bitmask
426 # for now
427 for v in range(rank ** 2):
428
429 # Create a flat arraypadding4D
430 paddings = np.zeros((rank * 2), dtype=np.int32)
431
432 # Fill in the 1's
433 for r in (range(rank * 2)):
434 if (v >> r) & 1:
435 paddings[r] = 1
436
437 # Reshape back to a 2D array
438 paddings = paddings.reshape((rank, 2))
439
440 arg_list.append(('pad{0:b}'.format(v), [ paddings ]))
441
442 return arg_list
443
444 @staticmethod
445 def agPooling(testGen, opName, shapeList, dtype):
446 arg_list = []
447
448 shape = shapeList[0]
449 assert(len(shape) == 4)
450
451 maxStride = testGen.args.max_pooling_stride
452 maxKernel = testGen.args.max_pooling_kernel
453 maxPadding = testGen.args.max_pooling_padding + 1
454
455 for kernel in range(0, maxKernel ** 2):
456 for stride in range(0, maxStride ** 2):
457 for padding in range(0, maxPadding ** 4):
458 s = [stride // maxStride + 1,
459 stride % maxStride + 1]
460 k = [(kernel // maxKernel) + 2,
461 (kernel % maxKernel) + 2]
462 p = [(padding // (maxPadding * 4)) % maxPadding,
463 (padding // (maxPadding * 2)) % maxPadding,
464 (padding // (maxPadding * 1)) % maxPadding,
465 padding % maxPadding]
466
467 arg_list.append(('st{}{}_kern{}{}_pad{}{}{}{}'.format(s[0], s[1],
468 k[0], k[1],
469 p[0], p[1], p[2], p[3]),
470 [k, s, p]))
471 return arg_list
472
473 @staticmethod
474 def agCast(testGen, opName, shapeList, inDtype):
475 arg_list = []
476
477 # Enumerate the output types here
478 if inDtype == DType.INT8:
479 dtypeList = [ DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT ]
480 elif inDtype == DType.INT16:
481 dtypeList = [ DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT ]
482 elif inDtype == DType.INT32:
483 dtypeList = [ DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT ]
484 elif inDtype == DType.BOOL:
485 dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
486 elif inDtype == DType.FLOAT:
487 dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ]
488 else:
489 raise Exception('Unexpected input dtype: {}'.format(inDtype))
490
491 for dtype in dtypeList:
492 arg_list.append(('out{}'.format(DTypeNames[dtype]), [dtype]))
493
494 return arg_list
495
496 @staticmethod
497 def agRescale(testGen, opName, shapeList, inDtype):
498 arg_list = []
499
500 # Enumerate the output types here
Kevin Cheng3a478572021-01-22 17:21:02 -0800501 for dtype in [ DType.INT8, DType.INT16, DType.INT32 ]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700502 for scale32 in [ False, True ]:
503 for double_round in [ False, True ]:
504 for per_channel in [ False, True ]:
505
506 if inDtype == DType.INT48 and scale32:
507 # Illegal condition. Must be scale32=False
508 continue
509
510 arg_list.append(('out{}_sc{}_dr{}_pc{}'.format(DTypeNames[dtype], int(scale32), int(double_round), int(per_channel)),
511 [dtype, scale32, double_round, per_channel]))
512
513 return arg_list
514
Kevin Chengaee1fac2020-11-11 13:54:06 -0800515 @staticmethod
516 def agMul(testGen, opName, shapeList, dtype):
517 arg_list = []
518
519 if dtype is DType.INT32:
520 for p in range(testGen.args.num_rand_permutations):
521
522 shift = testGen.randInt(0, 32)
523
524 arg_list.append(('perm{}_shift{}'.format(p, shift), [shift]))
525 else:
526 arg_list.append(('shift0', [0]))
527
528 return arg_list
529
530 @staticmethod
531 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
532 arg_list = []
533
534 arg_list.append(('roundTrue', [True]))
535 arg_list.append(('roundFalse', [False]))
536
537 return arg_list
538
Eric Kunzee5e26762020-10-13 16:11:07 -0700539 # Helper function for reshape. Gets some factors of a larger number.
540 @staticmethod
541 def getFactors(val, start=1):
542 factors = []
543
544 for i in range(start, int(np.sqrt(val))):
545 if (val % i) == 0:
546 factors.append(i)
547
548 return factors
549
550 @staticmethod
551 def agReshape(testGen, opName, shapeList, dtype):
552 arg_list = []
553
554 origShape = shapeList[0]
555
556 totalElements = 1
557 for s in origShape:
558 totalElements *= s
559
560 # This code is NOT fast. Fortunately, the numbers are fairly small.
561 factors = TosaArgGen.getFactors(totalElements)
562
563 for p in range(testGen.args.num_rand_permutations):
564 newRank = testGen.randInt(1, 6)
565 newShape = []
566 if (len(factors) < newRank):
567 continue
568
569 remainingElements = totalElements
570 shuffledFactors = testGen.rng.permutation(factors)
571 for i in range(newRank):
572 # pick rank-1 factors
573 newShape.append(shuffledFactors[0])
574 remainingElements = remainingElements // shuffledFactors[0]
575 shuffledFactors = testGen.rng.permutation(TosaArgGen.getFactors(remainingElements))
576 newShape.append(remainingElements)
577
578 # Toss in a -1 sometimes
579 minusOne = testGen.randInt(0, newRank * 4)
580 if minusOne < newRank:
581 newShape[minusOne] = -1
582
583 arg_list.append(('perm{}_rank{}'.format(p, newRank), [newShape]))
584
585 return arg_list
586
587
588 @staticmethod
589 def agTranspose(testGen, opName, shapeList, dtype):
590 arg_list = []
591
592 ifm_shape = shapeList[0]
593
594 perms = range(len(ifm_shape))
595 for p in range(testGen.args.num_rand_permutations):
596 perms = np.int32(testGen.rng.permutation(perms)).tolist()
597
598 # Avoid duplicates
599 found = False
600 for name, other_perm in arg_list:
601 if other_perm[0] == perms:
602 found = True
603 break
604
605 if not found:
606 arg_list.append(('perm{}'.format(p), [perms]))
607
608 return arg_list
609
610 @staticmethod
611 def agSlice(testGen, opName, shapeList, dtype):
612 arg_list = []
613
614 ifm_shape = shapeList[0]
615 rank = len(ifm_shape)
616
617 for p in range(testGen.args.num_rand_permutations):
618 begin = []
619 size = []
620
621 valid=True
622
623 for i in range(rank):
624 if ifm_shape[i] > 1:
625 begin.append(testGen.randInt(0, ifm_shape[i]))
626 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
627
628 # Invalid slice size?
629 if size[i] == 0:
630 valid = False
631 else:
632 begin.append(0)
633 size.append(1)
634
635 if valid:
636 arg_list.append(('perm{}'.format(p), [begin, size]))
637 return arg_list
638
639 @staticmethod
640 def agTile(testGen, opName, shapeList, dtype):
641 arg_list = []
642
643 ifm_shape = shapeList[0]
644 rank = len(ifm_shape)
645
646 for p in range(testGen.args.num_rand_permutations):
647
648 # Pick a few random, but small multiple values
649 # because otherwise this has a tendency to generate
650 # enormous tensors
651 multiples = []
652 for i in range(rank):
653 multiples.append(testGen.randInt(1, 4))
654
655 arg_list.append(('perm{}'.format(p), [multiples]))
656
657 return arg_list
658
659 @staticmethod
660 def agResize(testGen, opName, shapeList, dtype):
661 arg_list = []
662
663 ifm_shape = shapeList[0]
664
665 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
666
667 # Exclude illegal {mode, type} configurations. Pick legal output types
668 if m == ResizeMode.NEAREST and dtype == DType.INT8:
669 outputDTypeList = [ DType.INT32 ]
670 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
671 outputDTypeList = [ DType.INT16 ]
672 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
673 outputDTypeList = [ DType.INT8 ]
674 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
675 outputDTypeList = [ DType.INT48 ]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800676 elif dtype == DType.FLOAT:
677 outputDTypeList = [ DType.FLOAT ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700678 else:
679 continue
680
681 for outputDType in outputDTypeList:
682 for perm in range(testGen.args.num_rand_permutations):
683
684 # Randomly generate legal output dimensions and shift
685 # and then compute the stride and offset based on them
Jared Smolense2e13cd2021-01-27 08:33:20 -0800686 output_dims = [ testGen.randInt(1), testGen.randInt(1) ]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800687 in_center_h = (ifm_shape[1] - 1) / 2.0
688 in_center_w = (ifm_shape[2] - 1) / 2.0
689 out_center_h = (output_dims[0] - 1) / 2.0
690 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700691
Kevin Cheng77d0f762020-11-24 10:26:32 -0800692 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
693 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
694 fp_offset_y = in_center_h - fp_stride_y * out_center_h
695 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700696
Kevin Cheng77d0f762020-11-24 10:26:32 -0800697 if outputDType == DType.FLOAT:
698 shift = 0
699 stride = [0, 0]
700 offset = [0, 0]
701 stride_fp = [ fp_stride_y, fp_stride_x]
702 offset_fp = [ fp_offset_y, fp_offset_x]
703 arg_list.append(('mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}'.format(m, output_dims[0], output_dims[1],
704 testGen.typeStr(outputDType), stride_fp[0], stride_fp[1],
705 offset_fp[0], offset_fp[1]),
706 [m, stride, offset, shift, stride_fp, offset_fp, output_dims, dtype, outputDType]))
707 else:
708 shift = 11
709 unit = float(1 << shift)
710 stride_y = int(round(fp_stride_y * unit))
711 stride_x = int(round(fp_stride_x * unit))
712 offset_y = int(round(fp_offset_y * unit))
713 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700714
Kevin Cheng77d0f762020-11-24 10:26:32 -0800715 while (stride_y >= 32768 or stride_x >= 32768 or offset_y >= 32768 or offset_x >= 32768 or offset_y < -32768 or offset_x < -32768):
716 shift = shift - 1
717 unit = float(1 << shift)
718 stride_y = int(round(fp_stride_y * unit))
719 stride_x = int(round(fp_stride_x * unit))
720 offset_y = int(round(fp_offset_y * unit))
721 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700722
Kevin Cheng77d0f762020-11-24 10:26:32 -0800723 stride = [ stride_y, stride_x]
724 offset = [ offset_y, offset_x]
725
726 stride_fp = [0.0, 0.0]
727 offset_fp = [0.0, 0.0]
728
729 arg_list.append(('mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}'.format(m, shift, output_dims[0], output_dims[1],
730 testGen.typeStr(outputDType), stride[0], stride[1],
731 offset[0], offset[1]),
732 [m, stride, offset, shift, stride_fp, offset_fp, output_dims, dtype, outputDType]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700733
734 return arg_list
735
736 def agCondIf(testGen, opName, shapeList, dtype):
737 # CondIf generates the condition values here.
738 # Convert to tensors in the build function, along with the
739 # then and else blocks
740 arg_list = []
741
742 for c in [False, True]:
743 arg_list.append(('cond{}'.format(int(c)), [ c ]))
744
745 return arg_list
746
747 def agWhileLoop(testGen, opName, shapeList, dtype):
748 # While loop: 0 iterations, 1, more than 1
749 arg_list = []
750
751 for iter in [0, 1, 4]:
752 arg_list.append(('iter{}'.format(iter), [ iter ]))
753
754 return arg_list
755
756class TosaTestGen:
757 def __init__(self, args):
758 self.args = args
759 self.basePath = args.output_dir
760 self.random_seed = args.random_seed
761 self.ser = None
762 self.rng = np.random.default_rng(self.random_seed)
763 self.createDynamicOpLists()
764 self.initOpListDefaults()
765 self.quantGen = TosaQuantGen()
766 # Force makeShape to do a specific starting shape
767 self.targetted_shape = None
768
769 def createSerializer(self, opName, testPath):
770 self.testPath = os.path.join(opName, testPath)
771
772 fullPath = os.path.join(self.basePath, self.testPath)
773 os.makedirs(fullPath, exist_ok=True)
774 self.ser = ts.TosaSerializer(fullPath)
775
776 def getSerializer(self):
777 return self.ser
778
779 def serialize(self, testName):
780 with open(os.path.join(self.basePath, self.testPath, '{}.tosa'.format(testName)), 'wb') as fd:
781 fd.write(self.ser.serialize())
782
783 with open(os.path.join(self.basePath, self.testPath, 'desc.json'), 'w') as fd:
784 fd.write(self.ser.writeJson('{}.tosa'.format(testName)))
785
786 def getRandTensor(self, shape, dtype):
787 RAND_SHIFT_FACTOR = 0.5
788 RAND_SCALE_FACTOR = 4.0
789
790 if dtype == DType.BOOL:
791 np_dt = np.bool
792 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700793 elif dtype == DType.INT4:
794 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
795 elif dtype == DType.INT8:
796 return np.int32(self.rng.integers(low=-127, high=128, size=shape))
797 elif dtype == DType.INT16:
798 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
799 elif dtype == DType.INT32:
800 return np.int32(self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape))
801 elif dtype == DType.INT48:
802 return np.int64(self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape))
803 elif dtype == DType.FLOAT:
804 return np.float32(self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR)
805 else:
806 raise Exception('Unrecognized Dtype: {}'.format(dtype))
807
808 def buildPlaceholderTensors(self, shape_list, dtype):
809 placeholders = []
810
811 for shape in shape_list:
812 arr = self.getRandTensor(shape, dtype)
813 placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr))
814
815 return placeholders
816
817 def buildConstTensors(self, shape_list, dtype):
818 consts = []
819
820 for shape in shape_list:
821 arr = self.getRandTensor(shape, dtype)
822 consts.append(self.ser.addConst(shape, dtype, Usage.ACTIVATION, [], arr))
823
824 return consts
825
826 def makeShape(self, rank):
827 if self.targetted_shape:
828 return np.int32(self.targetted_shape)
829 return np.int32(self.rng.integers(low=self.args.tensor_shape_range[0],
830 high=self.args.tensor_shape_range[1],
831 size=rank))
832
833 def setTargetShape(self, shape):
834 self.targetted_shape = shape
835
836 def randInt(self, low=0, high=256):
837 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
838
839 def getRandNumberDType(self, dtype):
840 if dtype == DType.FLOAT:
841 return self.rng.random()
842 elif dtype == DType.BOOL:
843 return self.rng.choice([False, True])
844 elif dtype == DType.INT4:
845 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -0700846 elif dtype == DType.INT8:
847 low, high = (-127, 128)
848 elif dtype == DType.INT16:
849 low, high = (-32768, 32768)
850 elif dtype == DType.INT32:
851 low, high = (-(1<<31), (1<<31))
852 elif dtype == DType.INT48:
853 low, high = (-(1<<47), (1<<47))
854 # Special size
855 return np.int64(self.rng.integers(low, high, size=1))[0]
856 else:
857 raise Exception('Unknown dtype: {}'.format(dtype))
858
859 return np.int32(self.rng.integers(low, high, size=1))[0]
860
861 def shapeStr(self, shape):
862
863 sStr = []
864 # Convert to strings
865 for i in shape:
866 sStr.append(str(i))
867
868 return 'x'.join(sStr)
869
870 def typeStr(self, t):
871 if t == DType.BOOL:
872 return 'b'
Eric Kunzee5e26762020-10-13 16:11:07 -0700873 elif t == DType.INT4:
874 return 'i4'
875 elif t == DType.INT8:
876 return 'i8'
Kevin Cheng3a478572021-01-22 17:21:02 -0800877 elif t == DType.UINT8:
878 return 'u8'
Eric Kunzee5e26762020-10-13 16:11:07 -0700879 elif t == DType.INT16:
880 return 'i16'
881 elif t == DType.INT32:
882 return 'i32'
883 elif t == DType.INT48:
884 return 'i48'
885 elif t == DType.FLOAT:
886 return 'float'
887 else:
888 raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
889
890 def typeWidth(self, t):
891 ''' Get the datatype width for integer types'''
Kevin Cheng3a478572021-01-22 17:21:02 -0800892 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -0700893 return 4
894 elif t == DType.INT8:
895 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -0800896 elif t == DType.UINT8:
897 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -0700898 elif t == DType.INT16:
899 return 16
900 elif t == DType.INT32:
901 return 32
902 elif t == DType.INT48:
903 return 48
904 else:
905 raise Exception('Unknown dtype, cannot convert to string: {}'.format(t))
906
907 # Argument generators
908 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
909 # Where the string descriptor is used to generate the test name and
910 # The build_fcn_arg_list is expanded and passed to the operator test
911 # build function
912
913
914 def build_unary(self, op, a, qinfo = None):
915 result_tens = OutputShaper.unaryOp(self.ser, a)
916 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
917 return result_tens
918
919 def build_binary_broadcast(self, op, a, b):
920 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
921 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
922 return result_tens
923
924 def build_binary_nonbroadcast(self, op, a, b):
925 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
926 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
927 return result_tens
928
Kevin Chengaee1fac2020-11-11 13:54:06 -0800929 def build_arithmetic_right_shift(self, op, a, b, round):
930 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
931
932 attr = ts.TosaSerializerAttribute()
933 attr.ArithmeticRightShiftAttribute(round)
934
935 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
936 return result_tens
937
938 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -0700939 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
940
941 # Special for multiply:
942 # Force the result to INT32 for INT types
943 if a.dtype != DType.FLOAT:
944 result_tens.setDtype(DType.INT32)
945
Kevin Chengaee1fac2020-11-11 13:54:06 -0800946 attr = ts.TosaSerializerAttribute()
947 attr.MulAttribute(shift)
948
949 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -0700950 return result_tens
951
952 def build_table(self, op, a):
953 # Constant size, random values
954 table_arr = self.getRandTensor([513], DType.INT16)
955 table_tens = self.ser.addConst(table_arr.shape, DType.INT16, Usage.INDEX, [], table_arr)
956
957 result_tens = OutputShaper.tableOp(self.ser, a, table_tens)
958 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
959
960 return result_tens
961
962 def build_select(self, op, cond, a, b):
963
964 # Replace the cond tensor with a boolean tensor since it probably
965 # has the wrong dtype
966 t = self.buildPlaceholderTensors([cond.shape], DType.BOOL)
967 cond = t[0]
968
969 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
970 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
971
972 return result_tens
973
974 def build_comparison(self, op, a, b):
975 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
976 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
977 return result_tens
978
979 def build_argmax(self, op, a, axis):
980 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
981
982 attr = ts.TosaSerializerAttribute()
983 attr.AxisAttribute(axis)
984
985 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
986 return result_tens
987
988 def build_pool2d(self, op, input, kernel, stride, pad, qinfo = None):
989 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
990
991 attr = ts.TosaSerializerAttribute()
992 attr.Pool2dAttribute(kernel, stride, pad)
993 input.addFormat(Format.NHWC)
994
995 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
996 return result_tens
997
998 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
999 assert(len(padding) == 4)
1000 result_tens = OutputShaper.conv2dOp(self.ser, ifm, filter, strides, padding, dilations)
1001
1002 attr = ts.TosaSerializerAttribute()
1003 attr.Conv2dAttribute(padding, strides, dilations)
1004
1005 ifm.addFormat(Format.NHWC)
1006 # Update the filter ordering
1007 filter.addUsage(Usage.WEIGHT)
1008 filter.addFormat(Format.OHWI)
1009
1010 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
1011 return result_tens
1012
1013 def build_transpose_conv2d(self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo):
1014 assert(len(outpad) == 2)
1015 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1016
1017 attr = ts.TosaSerializerAttribute()
1018 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1019
1020 ifm.addFormat(Format.NHWC)
1021 # Update the filter ordering
1022 filter.addUsage(Usage.WEIGHT)
1023 filter.addFormat(Format.OHWI)
1024
1025 # Create bias here since the acc_t depends on (but isn't the same as) the input dtype
1026 # The bias is OC
Kevin Cheng3a478572021-01-22 17:21:02 -08001027 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07001028 bias_type = DType.INT32
1029 elif ifm.dtype == DType.INT16:
1030 bias_type = DType.INT48
1031 elif ifm.dtype == DType.FLOAT:
1032 bias_type = DType.FLOAT
1033 else:
1034 raise Exception('Unsupported dtype for transpose_conv2d: {}'.format(ifm.dtype))
1035
1036 bias_arr = self.getRandTensor([filter.shape[0]], bias_type)
1037 bias_tens = self.ser.addConst([filter.shape[0]], bias_type, [], [], bias_arr)
1038
1039 self.ser.addOperator(op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo)
1040 return result_tens
1041
1042 def build_depthwise_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
1043 result_tens = OutputShaper.depthwiseConv2dOp(self.ser, ifm, filter, strides, padding, dilations)
1044
1045 attr = ts.TosaSerializerAttribute()
1046 attr.Conv2dAttribute(padding, strides, dilations)
1047
1048 ifm.addFormat(Format.NHWC)
1049 filter.addUsage(Usage.WEIGHT)
1050 filter.addFormat(Format.HWIM)
1051
1052 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo)
1053 return result_tens
1054
1055 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1056 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1057
1058 filter.addUsage(Usage.WEIGHT)
1059 self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo)
1060 return result_tens
1061
1062 def build_matmul(self, op, a, b, qinfo):
1063 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1064 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1065 return result_tens
1066
1067 def build_reduce(self, op, a, axis):
1068 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1069
1070 attr = ts.TosaSerializerAttribute()
1071 attr.AxisAttribute(axis)
1072
1073 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1074 return result_tens
1075
1076 def build_clamp(self, op, a):
1077 result_tens = OutputShaper.unaryOp(self.ser, a)
1078
1079 attr = ts.TosaSerializerAttribute()
1080
1081 # Get two random ints
1082 v = [self.randInt(), self.randInt()]
1083
1084 if a.dtype == DType.FLOAT:
1085 attr.ClampAttribute(0, 0, min(v), max(v))
1086 else:
1087 attr.ClampAttribute(min(v), max(v), 0, 0)
1088
1089 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1090 return result_tens
1091
1092 def build_leaky_relu(self, op, a):
1093 result_tens = OutputShaper.unaryOp(self.ser, a)
1094 attr = ts.TosaSerializerAttribute()
1095
1096 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1097
1098 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1099 return result_tens
1100
1101 # Needs an additional type/input
1102 def build_prelu(self, op, a):
1103 result_tens = OutputShaper.unaryOp(self.ser, a)
1104
1105 self.ser.addOperator(op, [a.name], [result_tens.name])
1106 return result_tens
1107
1108 def build_relun(self, op, a):
1109 result_tens = OutputShaper.unaryOp(self.ser, a)
1110
1111 attr = ts.TosaSerializerAttribute()
1112
1113 if a.dtype == DType.FLOAT:
1114 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1115 else:
1116 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1117
1118 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1119 return result_tens
1120
1121 def build_sigmoid(self, op, a):
1122 result_tens = OutputShaper.unaryOp(self.ser, a)
1123 self.ser.addOperator(op, [a.name], [result_tens.name])
1124 return result_tens
1125
1126 def build_tanh(self, op, a):
1127 result_tens = OutputShaper.unaryOp(self.ser, a)
1128 self.ser.addOperator(op, [a.name], [result_tens.name])
1129 return result_tens
1130
1131 def build_concat(self, op, a, b, axis):
1132 result_tens = OutputShaper.concatOp(self.ser, a, b, axis)
1133
1134 attr = ts.TosaSerializerAttribute()
1135 attr.AxisAttribute(axis)
1136
1137 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1138
1139 def build_pad(self, op, a, padding, qinfo):
1140 result_tens = OutputShaper.padOp(self.ser, a, padding)
1141
1142 # Need to turn the padding array into a TOSA tensor here.
1143 # This is one of the few tensor operands that does not get
1144 # randomly generated
1145 padding_tens = self.ser.addConst(padding.shape, DType.INT32, [], [], padding)
1146
1147 self.ser.addOperator(op, [a.name, padding_tens.name], [result_tens.name], None, qinfo)
1148
1149 def build_reshape(self, op, a, newShape):
1150 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1151
1152 attr = ts.TosaSerializerAttribute()
1153 attr.ReshapeAttribute(newShape)
1154
1155 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1156 return result_tens
1157
1158 def build_reverse(self, op, a, axis):
1159 result_tens = OutputShaper.unaryOp(self.ser, a)
1160
1161 attr = ts.TosaSerializerAttribute()
1162 attr.AxisAttribute(axis)
1163
1164 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1165 return result_tens
1166
1167 def build_transpose(self, op, a, perms):
1168 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1169
1170 perms_tens = self.ser.addConst([len(perms)], DType.INT32, Usage.ACTIVATION, [], np.int32(perms))
1171
1172 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1173 return result_tens
1174
1175 def build_slice(self, op, a, begin, size):
1176 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1177
1178 attr = ts.TosaSerializerAttribute()
1179 attr.SliceAttribute(begin, size)
1180
1181 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1182 return result_tens
1183
1184 def build_tile(self, op, a, multiples):
1185 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1186
1187 attr = ts.TosaSerializerAttribute()
1188 attr.TileAttribute(multiples)
1189
1190 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1191 return result_tens
1192
1193
Kevin Cheng77d0f762020-11-24 10:26:32 -08001194 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001195
1196 # Create a new indicies tensor
1197 # here with data that doesn't exceed the dimensions of the values tensor
1198
Kevin Cheng77d0f762020-11-24 10:26:32 -08001199 K = values.shape[1] # K
1200 W = self.randInt(self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]) # W
1201 indicies_arr = np.int32(self.rng.integers(low=0, high=K, size=[values.shape[0], W])) # (N, W)
Eric Kunzee5e26762020-10-13 16:11:07 -07001202 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr)
1203
Kevin Cheng77d0f762020-11-24 10:26:32 -08001204 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001205
Kevin Cheng77d0f762020-11-24 10:26:32 -08001206 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001207
1208 return result_tens
1209
Kevin Cheng77d0f762020-11-24 10:26:32 -08001210 def build_scatter(self, op, values_in, input):
1211
1212 # Create a new indicies tensor
1213 # here with data that doesn't exceed the dimensions of the values_in tensor
1214
1215 K = values_in.shape[1] # K
1216 W = input.shape[1] # W
1217 indicies_arr = np.int32(self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])) # (N, W)
1218 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr)
1219
1220 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1221
1222 self.ser.addOperator(op, [values_in.name, indicies.name, input.name], [result_tens.name])
1223
1224 return result_tens
1225
1226 def build_resize(self, op, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype):
1227 result_tens = OutputShaper.resizeOp(self.ser, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001228
1229 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001230
1231 attr.ResizeAttribute(output_dims, stride, offset, shift, stride_fp, offset_fp, mode)
Eric Kunzee5e26762020-10-13 16:11:07 -07001232
1233 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1234 return result_tens
1235
1236 def build_identityn(self, op, val, val2):
1237
1238 result_tens = OutputShaper.unaryOp(self.ser, val)
1239 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
1240 self.ser.addOperator(op, [val.name, val2.name], [result_tens.name, result_tens2.name])
1241 return result_tens
1242
1243 def build_placeholder(self, op, val):
1244 # Add an identity op to avoid warning in the reference model
1245 return self.build_unary(Op.IDENTITY, val)
1246
1247 # Type Conversion
1248 def build_cast(self, op, val, out_dtype):
1249 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1250 self.ser.addOperator(op, [val.name], [result_tens.name])
1251 return result_tens
1252
1253 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1254 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1255
1256 if per_channel:
1257 nc = val.shape[-1]
1258 else:
1259 nc = 1
1260
1261 in_type_width = self.typeWidth(val.dtype)
1262 out_type_width = self.typeWidth(out_dtype)
1263
Kevin Cheng3a478572021-01-22 17:21:02 -08001264 if val.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07001265 input_zp = self.randInt()
1266 in_type_width = in_type_width + 1
1267 else:
1268 input_zp = 0
1269
Kevin Cheng3a478572021-01-22 17:21:02 -08001270 if out_dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07001271 output_zp = self.randInt()
1272 out_type_width = out_type_width + 1
1273 else:
1274 output_zp = 0
1275
1276 # Calculate scale based on:
1277 # scale = a *(2^output_width)/(2^input_width))
1278
1279 a = np.float32(self.rng.random(size=[nc]))
1280 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1281
1282 if scale32:
1283 pass
1284 # Cap the scaling at 2^15 - 1 for scale16
1285 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1286 else:
1287 # Cap the scaling at 2^15 - 1 for scale16
1288 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1289
1290 #print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
1291
1292 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1293 shift_arr = np.int32(np.zeros(shape=[nc]))
1294
1295 for i in range(nc):
1296 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(scale_arr[i], scale32)
Kevin Chengaee1fac2020-11-11 13:54:06 -08001297 if shift_arr[i] < 2 or shift_arr[i] > 62:
1298 self.ser.setExpectedFailure(True, 'OpRescale: invalid shift value')
Eric Kunzee5e26762020-10-13 16:11:07 -07001299
1300 #print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
1301
1302 attr = ts.TosaSerializerAttribute()
1303 attr.RescaleAttribute(input_zp,
1304 output_zp,
1305 multiplier_arr,
1306 shift_arr,
1307 scale32,
1308 double_round,
1309
1310 per_channel)
1311
1312 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1313 return result_tens
1314
1315 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1316 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1317 # (except for the generated shap) and the condition. Build Then/Else blocks
1318 # and fill them with const nodes for the body.
1319
1320 # Condition tensor
1321 cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
1322
1323 # Make then/else tensors
1324 out_shape = then_tens.shape
1325 then_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1326 else_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1327
1328 # And the result tensor based on any of the outputs
1329 result_tens = self.ser.addOutput(out_shape, DType.INT32, Usage.ACTIVATION, [])
1330
1331 # Create the attribute with the names of the then/else blocks
1332 then_block = 'THEN_BLOCK'
1333 else_block = 'ELSE_BLOCK'
1334 attr = ts.TosaSerializerAttribute()
1335 attr.CondIfAttribute(then_block, else_block)
1336
1337 # Finally, build the op and the two blocks
1338 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1339
1340 self.ser.startBasicBlock(then_block)
1341 # Build the actual then/else tensors inside their blocks
1342 then_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], then_arr)
1343 self.ser.addOutputTensor(then_tens)
1344
1345 self.ser.startBasicBlock(else_block)
1346 else_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], else_arr)
1347 self.ser.addOutputTensor(else_tens)
1348
1349 return result_tens
1350
1351 def build_cond_if_binary(self, op, a, b, cond):
1352 # For cond_if with a binary op in the then/else blocks, take a and b and
1353 # alternately add or subtract them based on the condition
1354
1355 # Condition tensor
1356 cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond])
1357
1358 result_tens = self.ser.addOutput(a.shape, a.dtype, Usage.ACTIVATION, [])
1359 self.ser.currBasicBlock.addOutput(result_tens.name)
1360
1361 # Create the attribute with the names of the then/else blocks
1362 then_block = 'THEN_BLOCK'
1363 else_block = 'ELSE_BLOCK'
1364 attr = ts.TosaSerializerAttribute()
1365 attr.CondIfAttribute(then_block, else_block)
1366
1367 # Finally, build the op and the two blocks
1368 self.ser.addOperator(op, [cond_tens.name, a.name, b.name], [result_tens.name], attr)
1369
1370 self.ser.startBasicBlock(then_block)
1371 self.ser.addInputTensor(a)
1372 self.ser.addInputTensor(b)
1373 then_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1374 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1375
1376 self.ser.startBasicBlock(else_block)
1377 self.ser.addInputTensor(a)
1378 self.ser.addInputTensor(b)
1379 else_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1380 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1381
1382 return result_tens
1383
1384 def build_while_loop(self, op, a, iter_val):
1385 iter = self.ser.addPlaceholder([], DType.INT32, Usage.ACTIVATION, [], [np.int32(iter_val)])
1386
1387 cond_block = 'COND_BLOCK'
1388 body_block = 'BODY_BLOCK'
1389
1390 attr = ts.TosaSerializerAttribute()
1391 attr.WhileLoopAttribute(cond_block, body_block)
1392
1393 # Accumulator tensor
1394 #acc = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
1395 acc_init_val = np.int32(np.zeros(a.shape))
1396 acc = self.ser.addPlaceholder(a.shape, a.dtype, a.usage, a.dformat, acc_init_val)
1397
1398 # Intermediate/output tensors for everything going through the loop
1399 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
1400 a_out = self.ser.addIntermediate(a.shape, a.dtype, a.usage, a.dformat)
1401 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
1402
1403 # While_loop operator
1404 self.ser.addOperator(op,
1405 [iter.name, a.name, acc.name],
1406 [iter_out.name, a_out.name, acc_out.name], attr)
1407
1408 # COND block (input: iter, output: cond_tens )
1409 self.ser.startBasicBlock(cond_block)
1410 self.ser.addInputTensor(iter)
1411 self.ser.addInputTensor(a)
1412 self.ser.addInputTensor(acc)
1413 zero_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(0)])
1414 cond_tens = self.ser.addOutput([], DType.BOOL, [], [])
1415 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name],
1416 [cond_tens.name])
1417
1418 # BODY block (input: a, acc, iter, output: a, acc, iter)
1419 # Note that local intermediate tensors need to be declared here for the outputs
1420 self.ser.startBasicBlock(body_block)
1421 self.ser.addInputTensor(iter)
1422 self.ser.addInputTensor(a)
1423 self.ser.addInputTensor(acc)
1424 one_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(1)])
1425 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat)
1426 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat)
1427 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1428 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1429 self.ser.addOutputTensor(iter_body_out)
1430 self.ser.addOutputTensor(a)
1431 self.ser.addOutputTensor(acc_body_out)
1432
1433 return acc_out
1434
1435
1436 def genOpTestList(self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None):
1437
1438 try:
1439 op = self.TOSA_OP_LIST[opName]
1440 except KeyError as e:
1441 raise Exception('Cannot find op with name {}'.format(opName))
1442
1443 # Initialize a new random number generator
1444 self.rng = np.random.default_rng(self.random_seed)
1445
1446 build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
1447
1448 # Generate the lists of arguments
1449 rmin, rmax = op['rank']
1450
1451 # Test list consists of a tuple of:
1452 # (opName, testNameStr, dtype, shapeList, argumentsList)
1453 testList = []
1454
1455 if not shapeFilter:
1456 shapeFilter = [None]
1457
1458 for r in range(rmin, rmax + 1):
1459
1460 # Filter out the rank?
1461 if rankFilter is not None and r not in rankFilter:
1462 continue
1463
1464 for t in op['types']:
1465
1466 # Filter tests based on dtype?
1467 if dtypeFilter is not None:
1468 if t not in dtypeFilter:
1469 continue
1470
1471 # Create the placeholder and const tensors
1472 for shape in shapeFilter:
1473 # A None shape chooses a random shape of a given rank
1474
1475 # Filter out by rank
1476 if shape is not None and len(shape) != r:
1477 continue
1478
1479 self.setTargetShape(shape)
1480 shapeList = tgen_fcn(self, op, r)
1481
1482 shapeStr = self.shapeStr(shapeList[0])
1483 typeStr = self.typeStr(t)
1484
1485 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1486 argList = []
1487 if agen_fcn:
1488 argList = agen_fcn(self, opName, shapeList, t)
1489 else:
1490 argList = [('', [])]
1491
1492 for argStr, args in argList:
1493 if argStr:
1494 testStr = '{}_{}_{}_{}'.format(opName, shapeStr, typeStr, argStr)
1495 else:
1496 testStr = '{}_{}_{}'.format(opName, shapeStr, typeStr)
1497
1498 testList.append((opName, testStr, t, shapeList, args))
1499
1500 return testList
1501
1502 def serializeTest(self, opName, testStr, dtype, shapeList, testArgs):
1503 try:
1504 op = self.TOSA_OP_LIST[opName]
1505 except KeyError as e:
1506 raise Exception('Cannot find op with name {}'.format(opName))
1507
1508 # Create a serializer
1509 self.createSerializer(opName, testStr)
1510
1511 build_fcn, tgen_fcn, agen_fcn = op['build_fcn']
1512 pCount, cCount = op['operands']
1513
1514 try:
1515 qgen = op['qgen']
1516 except KeyError:
1517 qgen = None
1518
1519 # Build the random tensor operands and the test
1520 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001521
1522 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
1523 if op['op'] == Op.ARITHMETIC_RIGHT_SHIFT:
1524 assert pCount == 2 and cCount == 0, 'Op.ArithmeticRightShift must have 2 placeholders, 0 consts'
1525
1526 placeholders = []
1527 for idx, shape in enumerate(shapeList[:]):
1528 if idx == 1:
1529 if dtype == DType.INT8:
1530 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
1531 elif dtype == DType.INT16:
1532 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
1533 elif dtype == DType.INT32:
1534 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1535 else:
1536 raise Exception('OpArithmeticRightShift: invalid input dtype')
1537 else:
1538 arr = self.getRandTensor(shapeList[0], dtype)
1539 placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr))
1540
1541 tens.extend(placeholders)
1542 else:
1543 tens.extend(self.buildPlaceholderTensors(shapeList[0:pCount], dtype))
1544 tens.extend(self.buildConstTensors(shapeList[pCount:], dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001545
1546 if qgen is not None:
1547 qinfo = qgen(self, op, dtype)
1548 else:
1549 qinfo = None
1550
1551 try:
1552 if qinfo is not None:
1553 resultName = build_fcn(self, op['op'], *tens, *testArgs, qinfo)
1554 else:
1555 resultName = build_fcn(self, op['op'], *tens, *testArgs)
1556 except TypeError as e:
1557 print('build_fcn: {}\nTensors: {}\nArgs: {}\n'.format(build_fcn, tens, testArgs))
1558 raise e
1559
1560 # Save the serialized test
1561 self.serialize('test')
1562
1563 def createDynamicOpLists(self):
1564
1565 # Dynamically create op lists for convolutions with a list of kernel sizes
1566 KERNELS = [ [1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3] ]
1567
1568 for k in KERNELS:
1569 testName = 'conv2d_{}x{}'.format(k[0], k[1])
1570 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['conv2d_TEMPLATE'].copy()
1571 self.TOSA_OP_LIST[testName]['filter'] = k
1572 self.TOSA_OP_LIST[testName]['template'] = False
1573
1574 testName = 'depthwise_conv2d_{}x{}'.format(k[0], k[1])
1575 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['depthwise_conv2d_TEMPLATE'].copy()
1576 self.TOSA_OP_LIST[testName]['filter'] = k
1577 self.TOSA_OP_LIST[testName]['template'] = False
1578
1579 testName = 'transpose_conv2d_{}x{}'.format(k[0], k[1])
1580 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['transpose_conv2d_TEMPLATE'].copy()
1581 self.TOSA_OP_LIST[testName]['filter'] = k
1582 self.TOSA_OP_LIST[testName]['template'] = False
1583
1584 # Delete any templates after having created any dynamic ops
1585 # This is a two-pass operation because it's bad practice to delete
1586 # keys from dictionaries while iterating
1587 keyList = []
1588 for k in self.TOSA_OP_LIST:
1589 try:
1590 if self.TOSA_OP_LIST[k]['template'] == True:
1591 keyList.append(k)
1592 continue
1593 except KeyError:
1594 pass
1595
1596 for k in keyList:
1597 del self.TOSA_OP_LIST[k]
1598
1599 def initOpListDefaults(self):
1600 '''Fill in default fields for ops if they aren't already specified.
1601 Look for missing required fields (datastructure linting).'''
1602 for op in self.TOSA_OP_LIST:
1603
1604 # Required fields
1605 try:
1606 pl, c = self.TOSA_OP_LIST[op]['operands']
1607 except (KeyError, ValueError, TypeError):
1608 raise Exception('Op {} is missing a valid operand tuple in TOSA_OP_LIST'.format(op))
1609
1610 try:
1611 fcn, tgen, arggen = self.TOSA_OP_LIST[op]['build_fcn']
1612 except (KeyError, ValueError, TypeError):
1613 raise Exception('Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST'.format(op))
1614
1615 try:
1616 types = self.TOSA_OP_LIST[op]['types']
1617 except KeyError as e:
1618 raise Exception('Op {} is missing a valid type list in TOSA_OP_LIST'.format(op))
1619
1620 try:
1621 opcode = self.TOSA_OP_LIST[op]['op']
1622 except KeyError as e:
1623 raise Exception('Op {} is missing the Op field in TOSA_OP_LIST'.format(op))
1624
1625 # Put in default rank range, if missing
1626 try:
1627 rank = self.TOSA_OP_LIST[op]['rank']
1628 except KeyError:
1629 self.TOSA_OP_LIST[op]['rank'] = self.DEFAULT_RANK_RANGE
1630
1631 # Tensor operator list
1632 # 'op': op name
1633 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08001634 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
1635 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07001636 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
1637 # 'types': array of datatypes to be tested
1638 TYPE_FP = [ DType.FLOAT ]
1639
Kevin Cheng3a478572021-01-22 17:21:02 -08001640 TYPE_INT = [ DType.INT8, DType.INT16, DType.INT32 ] # Excludes INT4
1641 TYPE_INT_FP = [ DType.INT8, DType.INT16, DType.INT32, DType.FLOAT ] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07001642
Eric Kunzee5e26762020-10-13 16:11:07 -07001643 TYPE_BOOL = [ DType.BOOL ]
1644 TYPE_FI32 = [ DType.FLOAT, DType.INT32 ]
Kevin Cheng3a478572021-01-22 17:21:02 -08001645 TYPE_FIB = [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ]
Eric Kunzee5e26762020-10-13 16:11:07 -07001646 TYPE_FI16 = [ DType.FLOAT, DType.INT16 ]
1647
Kevin Cheng3a478572021-01-22 17:21:02 -08001648 TYPE_NARROW_INT_FP = [ DType.INT8, DType.INT16, DType.FLOAT ]
Eric Kunzee5e26762020-10-13 16:11:07 -07001649
1650 DEFAULT_RANK_RANGE = (1, 4)
1651
1652 TOSA_OP_LIST = {
1653 # Binary ops
1654 'add':
1655 { 'op': Op.ADD,
1656 'operands': (2, 0),
1657 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1658 'types': TYPE_FI32 },
1659
1660 'arithmetic_right_shift':
1661 { 'op': Op.ARITHMETIC_RIGHT_SHIFT,
1662 'operands': (2, 0),
Kevin Chengaee1fac2020-11-11 13:54:06 -08001663 'build_fcn': (build_arithmetic_right_shift, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agArithmeticRightShift),
Kevin Cheng3a478572021-01-22 17:21:02 -08001664 'types': TYPE_INT },
Eric Kunzee5e26762020-10-13 16:11:07 -07001665
1666 'bitwise_and':
1667 { 'op': Op.BITWISE_AND,
1668 'operands': (2, 0),
1669 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1670 'types': TYPE_INT },
1671
1672 'bitwise_or':
1673 { 'op': Op.BITWISE_OR,
1674 'operands': (2, 0),
1675 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1676 'types': TYPE_INT },
1677
1678 'bitwise_xor':
1679 { 'op': Op.BITWISE_XOR,
1680 'operands': (2, 0),
1681 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1682 'types': TYPE_INT },
1683
1684 'logical_and':
1685 { 'op': Op.LOGICAL_AND,
1686 'operands': (2, 0),
1687 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1688 'types': TYPE_BOOL },
1689
1690 'logical_left_shift':
1691 { 'op': Op.LOGICAL_LEFT_SHIFT,
1692 'operands': (2, 0),
1693 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
Kevin Cheng3a478572021-01-22 17:21:02 -08001694 'types': TYPE_INT },
Eric Kunzee5e26762020-10-13 16:11:07 -07001695
1696 'logical_right_shift':
1697 { 'op': Op.LOGICAL_RIGHT_SHIFT,
1698 'operands': (2, 0),
1699 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
Kevin Cheng3a478572021-01-22 17:21:02 -08001700 'types': TYPE_INT },
Eric Kunzee5e26762020-10-13 16:11:07 -07001701
1702 'logical_or':
1703 { 'op': Op.LOGICAL_OR,
1704 'operands': (2, 0),
1705 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1706 'types': TYPE_BOOL },
1707
1708 'logical_xor':
1709 { 'op': Op.LOGICAL_XOR,
1710 'operands': (2, 0),
1711 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1712 'types': TYPE_BOOL },
1713
1714 'max':
1715 { 'op': Op.MAXIMUM,
1716 'operands': (2, 0),
1717 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1718 'types': TYPE_FI32 },
1719
1720 'min':
1721 { 'op': Op.MINIMUM,
1722 'operands': (2, 0),
1723 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1724 'types': TYPE_FI32 },
1725
1726 'mul':
1727 { 'op': Op.MUL,
1728 'operands': (2, 0),
Kevin Chengaee1fac2020-11-11 13:54:06 -08001729 'build_fcn': (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
Kevin Cheng3a478572021-01-22 17:21:02 -08001730 'types': TYPE_INT_FP },
Eric Kunzee5e26762020-10-13 16:11:07 -07001731
1732 'pow':
1733 { 'op': Op.POW,
1734 'operands': (2, 0),
1735 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBasic, None),
1736 'types': TYPE_FP },
1737
1738 'sub':
1739 { 'op': Op.SUB,
1740 'operands': (2, 0),
1741 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
1742 'types': TYPE_FI32 },
1743
1744 'table':
1745 { 'op': Op.TABLE,
1746 # Use the automatic generation functions to create the input array
1747 # but create the table tensor in the build function, as it may be
1748 # a different type from the input
1749 'operands': (1, 0),
1750 'build_fcn': (build_table, TosaTensorGen.tgBasic, None),
1751 'types': [ DType.INT16 ] },
1752
1753 'argmax':
1754 { 'op': Op.ARGMAX,
1755 'operands': (1, 0),
1756 'build_fcn': (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1757 'types': TYPE_FP },
1758
1759 # Templated operator. Filled in by createDynamicOpLists
1760 'conv2d_TEMPLATE':
1761 { 'op': Op.CONV2D,
1762 'operands': (1, 2),
1763 'rank': (4, 4),
1764 'build_fcn': (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
1765 'qgen': TosaQuantGen.qgConv,
1766 'types': TYPE_FP,
1767 'template': True },
1768
1769 # Templated operator. Filled in by createDynamicOpLists
1770 'depthwise_conv2d_TEMPLATE':
1771 { 'op': Op.DEPTHWISE_CONV2D,
1772 'operands': (1, 2),
1773 'filter': [1, 1],
1774 'rank': (4, 4),
1775 'build_fcn': (build_depthwise_conv2d, TosaTensorGen.tgDepthwiseConv2D, TosaArgGen.agConv2D),
1776 'qgen': TosaQuantGen.qgConv,
1777 'types': TYPE_FP,
1778 'template': True },
1779
1780 # Templated operator. Filled in by createDynamicOpLists
1781 'transpose_conv2d_TEMPLATE':
1782 { 'op': Op.TRANSPOSE_CONV2D,
1783 'operands': (1, 1),
1784 'rank': (4, 4),
1785 'build_fcn': (build_transpose_conv2d, TosaTensorGen.tgTransposeConv2D, TosaArgGen.agTransposeConv2D),
1786 'qgen': TosaQuantGen.qgConv,
1787 'types': TYPE_FP,
1788 'template': True },
1789
1790 'fully_connected':
1791 { 'op': Op.FULLY_CONNECTED,
1792 'operands': (2, 0),
1793 'rank': (2, 2),
1794 'build_fcn': (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
1795 'qgen': TosaQuantGen.qgConv,
1796 'types': TYPE_FP },
1797
1798 'matmul':
1799 { 'op': Op.MATMUL,
1800 'operands': (2, 0),
1801 'rank': (2, 2),
1802 'build_fcn': (build_matmul, TosaTensorGen.tgMatmul, None),
1803 'qgen': TosaQuantGen.qgMatmul,
1804 'types': TYPE_NARROW_INT_FP },
1805
1806 # Unary operators
1807 'abs':
1808 { 'op': Op.ABS,
1809 'operands': (1, 0),
1810 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1811 'types': TYPE_FI32 },
1812
1813 'bitwise_not':
1814 { 'op': Op.BITWISE_NOT,
1815 'operands': (1, 0),
1816 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1817 'types': TYPE_INT },
1818
1819 'ceil':
1820 { 'op': Op.CEIL,
1821 'operands': (1, 0),
1822 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1823 'types': TYPE_FP },
1824
1825 'clz':
1826 { 'op': Op.CLZ,
1827 'operands': (1, 0),
1828 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1829 'types': [ DType.INT32 ] },
1830
1831 'exp':
1832 { 'op': Op.EXP,
1833 'operands': (1, 0),
1834 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1835 'types': TYPE_FP },
1836
1837 'floor':
1838 { 'op': Op.FLOOR,
1839 'operands': (1, 0),
1840 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1841 'types': TYPE_FP },
1842
1843 'log':
1844 { 'op': Op.LOG,
1845 'operands': (1, 0),
1846 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1847 'types': TYPE_FP },
1848
1849 'floor':
1850 { 'op': Op.FLOOR,
1851 'operands': (1, 0),
1852 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1853 'types': TYPE_FP },
1854
1855 'logical_not':
1856 { 'op': Op.LOGICAL_NOT,
1857 'operands': (1, 0),
1858 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1859 'types': TYPE_BOOL },
1860
1861 'negate':
1862 { 'op': Op.NEGATE,
1863 'operands': (1, 0),
1864 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1865 'qgen': TosaQuantGen.qgUnary,
1866 'types': TYPE_INT_FP },
1867
1868 'reciprocal':
1869 { 'op': Op.RECIPROCAL,
1870 'operands': (1, 0),
1871 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1872 'types': TYPE_FP },
1873
1874 'rsqrt':
1875 { 'op': Op.RSQRT,
1876 'operands': (1, 0),
1877 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None),
1878 'types': TYPE_FP },
1879
1880 # Ternary operators
1881 'select':
1882 { 'op': Op.SELECT,
1883 'operands': (3, 0),
1884 'build_fcn': (build_select, TosaTensorGen.tgBroadcastFuzz, None),
1885 'types': TYPE_FIB },
1886
1887 # Comparison operators
1888 'equal':
1889 { 'op': Op.EQUAL,
1890 'operands': (2, 0),
1891 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1892 'types': TYPE_FI32 },
1893
1894 'greater_equal':
1895 { 'op': Op.GREATER_EQUAL,
1896 'operands': (2, 0),
1897 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1898 'types': TYPE_FI32 },
1899
1900 'greater':
1901 { 'op': Op.GREATER,
1902 'operands': (2, 0),
1903 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
1904 'types': TYPE_FI32 },
1905
1906 # Pooling operators
1907 'avg_pool2d':
1908 { 'op': Op.AVG_POOL2D,
1909 'operands': (1, 0),
1910 'rank': (4, 4),
1911 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1912 'qgen': TosaQuantGen.qgUnary,
1913 'types': TYPE_NARROW_INT_FP },
1914
1915
1916 'max_pool2d':
1917 { 'op': Op.MAX_POOL2D,
1918 'operands': (1, 0),
1919 'rank': (4, 4),
1920 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1921 'types': TYPE_NARROW_INT_FP },
1922
1923 # Reduce operators
1924 'reduce_any':
1925 { 'op': Op.REDUCE_ANY,
1926 'operands': (1, 0),
1927 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1928 'types': TYPE_BOOL },
1929
1930 'reduce_all':
1931 { 'op': Op.REDUCE_ALL,
1932 'operands': (1, 0),
1933 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1934 'types': TYPE_BOOL },
1935
1936 'reduce_max':
1937 { 'op': Op.REDUCE_MAX,
1938 'operands': (1, 0),
1939 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1940 'types': TYPE_INT_FP },
1941
1942 'reduce_min':
1943 { 'op': Op.REDUCE_MAX,
1944 'operands': (1, 0),
1945 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1946 'types': TYPE_INT_FP },
1947
1948 'reduce_product':
1949 { 'op': Op.REDUCE_PRODUCT,
1950 'operands': (1, 0),
1951 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1952 'types': TYPE_FP },
1953
1954 'reduce_sum':
1955 { 'op': Op.REDUCE_SUM,
1956 'operands': (1, 0),
1957 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1958 'types': TYPE_FI32 },
1959
1960 # Activation functions
1961 'clamp':
1962 { 'op': Op.CLAMP,
1963 'operands': (1, 0),
1964 'build_fcn': (build_clamp, TosaTensorGen.tgBasic, None),
1965 'types': TYPE_NARROW_INT_FP },
1966
1967 'relun':
1968 { 'op': Op.RELUN,
1969 'operands': (1, 0),
1970 'build_fcn': (build_relun, TosaTensorGen.tgBasic, None),
1971 'types': TYPE_FI32 },
1972
1973 'sigmoid':
1974 { 'op': Op.SIGMOID,
1975 'operands': (1, 0),
1976 'build_fcn': (build_sigmoid, TosaTensorGen.tgBasic, None),
1977 'types': TYPE_FP },
1978
1979 'tanh':
1980 { 'op': Op.TANH,
1981 'operands': (1, 0),
1982 'build_fcn': (build_tanh, TosaTensorGen.tgBasic, None),
1983 'types': TYPE_FP },
1984
1985 # Data layout operators
1986 'concat':
1987 { 'op': Op.CONCAT,
1988 'operands': (2, 0),
1989 'build_fcn': (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1990 'types': TYPE_FIB },
1991
1992 'pad':
1993 { 'op': Op.PAD,
1994 'operands': (1, 0),
1995 'build_fcn': (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
1996 'qgen': TosaQuantGen.qgPad,
1997 'types': TYPE_FIB },
1998
1999 'reshape':
2000 { 'op': Op.RESHAPE,
2001 'operands': (1, 0),
2002 'build_fcn': (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2003 'types': TYPE_FIB },
2004
2005 'reverse':
2006 { 'op': Op.REVERSE,
2007 'operands': (1, 0),
2008 'build_fcn': (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2009 'types': TYPE_FIB },
2010
2011 'slice':
2012 { 'op': Op.SLICE,
2013 'operands': (1, 0),
2014 'build_fcn': (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2015 'types': TYPE_FIB },
2016
2017 'tile':
2018 { 'op': Op.TILE,
2019 'operands': (1, 0),
2020 'build_fcn': (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2021 'types': TYPE_FIB },
2022
2023 'transpose':
2024 { 'op': Op.TRANSPOSE,
2025 'operands': (1, 0),
2026 'rank': (2, 4), # Do not allow tranpose on rank=1
2027 'build_fcn': (build_transpose, TosaTensorGen.tgBasic, TosaArgGen.agTranspose),
2028 'types': TYPE_FIB },
2029
2030 # Scatter/Gather
2031 'gather':
2032 { 'op': Op.GATHER,
Kevin Cheng77d0f762020-11-24 10:26:32 -08002033 # Only specify 'values' tensor here. 'indices' is generated in op building stage
Eric Kunzee5e26762020-10-13 16:11:07 -07002034 'operands': (1, 0),
Kevin Cheng77d0f762020-11-24 10:26:32 -08002035 'rank': (3, 3),
2036 'build_fcn': (build_gather, TosaTensorGen.tgBasic, None),
2037 'types': TYPE_INT_FP },
Eric Kunzee5e26762020-10-13 16:11:07 -07002038
Kevin Cheng77d0f762020-11-24 10:26:32 -08002039 'scatter':
2040 { 'op': Op.SCATTER,
2041 # Only specify 'values_in' tensor here.
2042 #'indices' and 'input' are generated in op building stage
2043 'operands': (2, 0),
2044 'rank': (3, 3),
2045 'build_fcn': (build_scatter, TosaTensorGen.tgScatter, None),
2046 'types': TYPE_INT_FP },
Eric Kunzee5e26762020-10-13 16:11:07 -07002047
2048 # Image operations
2049 'resize':
2050 { 'op': Op.RESIZE,
2051 'operands': (1, 0),
2052 'rank': (4, 4),
2053 'build_fcn': ( build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
Kevin Cheng77d0f762020-11-24 10:26:32 -08002054 'types': [ DType.INT8, DType.INT16, DType.FLOAT ] },
Eric Kunzee5e26762020-10-13 16:11:07 -07002055
2056
2057 # Data nodes
2058 'placeholder':
2059 { 'op': Op.PLACEHOLDER,
2060 'operands': (1, 0),
2061 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
2062 'types': TYPE_FIB },
2063
2064 'const':
2065 { 'op': Op.CONST,
2066 'operands': (1, 0),
2067 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None),
2068 'types': TYPE_FIB },
2069
2070
2071 'identity':
2072 { 'op': Op.IDENTITY,
2073 'operands': (1, 0),
2074 'build_fcn': ( build_unary, TosaTensorGen.tgBasic, None),
2075 'types': TYPE_FIB },
2076
2077
2078 'identityn':
2079 { 'op': Op.IDENTITYN,
2080 'operands': (2, 0),
2081 'build_fcn': ( build_identityn, TosaTensorGen.tgBasic, None),
2082 'types': TYPE_FIB },
2083
2084 # Type conversion
2085 'cast':
2086 { 'op': Op.CAST,
2087 'operands': (1, 0),
2088 'build_fcn': ( build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast ),
2089 'types': [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] },
2090
2091 'rescale':
2092 { 'op': Op.RESCALE,
2093 'operands': (1, 0),
2094 'build_fcn': ( build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale ),
Kevin Cheng3a478572021-01-22 17:21:02 -08002095 'types': [ DType.INT8, DType.INT16, DType.INT32, DType.INT48 ] },
Eric Kunzee5e26762020-10-13 16:11:07 -07002096
2097 # Custom
2098 # Not implemented.
2099
2100 # Control flow
2101
2102 # Two varients of cond_if, one that generates one of two constant tensors (no
2103 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2104 # (two inputs to the basic blocks, one output)
2105 'cond_if_const':
2106 { 'op': Op.COND_IF,
2107 'operands': (0, 2),
2108 'build_fcn': ( build_cond_if_const, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
2109 'types': [ DType.BOOL ] },
2110
2111 'cond_if_binary':
2112 { 'op': Op.COND_IF,
2113 'operands': (2, 0),
2114 'build_fcn': ( build_cond_if_binary, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ),
2115 'types': TYPE_FI32 },
2116
2117 # while_loop
2118 'while_loop':
2119 { 'op': Op.WHILE_LOOP,
2120 'operands': (0, 1),
2121 'build_fcn': ( build_while_loop, TosaTensorGen.tgBasic, TosaArgGen.agWhileLoop ),
2122 'types': [DType.INT32] },
2123
2124
2125 }
2126
2127class OutputShaper:
2128 # Methods in this class compute the expected output shape and datatype
2129 # for common classes of operations
2130 def __init__(self):
2131 pass
2132
2133 # These methods return arguments that can be used for
2134 # creating a new output tensor
2135 @staticmethod
2136 def binaryBroadcastOp(ser, a, b):
2137 assert(len(a.shape) == len(b.shape))
2138 assert(a.dtype == b.dtype)
2139
2140 shape = []
2141 for i in range(len(a.shape)):
2142 if a.shape[i] == 1:
2143 shape.append(b.shape[i])
2144 else:
2145 shape.append(a.shape[i])
2146
2147 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2148
2149 @staticmethod
2150 def binaryNonBroadcastOp(ser, a, b):
2151 assert(len(a.shape) == len(b.shape))
2152 assert(a.dtype == b.dtype)
2153
2154 shape = []
2155 for i in range(len(a.shape)):
2156 assert(a.shape[i] == b.shape[i])
2157 shape.append(a.shape[i])
2158
2159 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2160
2161 @staticmethod
2162 def unaryOp(ser, a):
2163 return ser.addOutput(a.shape, a.dtype, a.usage, a.dformat)
2164
2165 @staticmethod
2166 def selectOp(ser, cond, a, b):
2167 assert(len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape))
2168 assert(a.dtype == b.dtype)
2169
2170 shape = []
2171 for i in range(len(a.shape)):
2172 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2173
2174 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2175
2176 @staticmethod
2177 def binaryComparisonOp(ser, a, b):
2178 assert(len(a.shape) == len(b.shape))
2179 assert(a.dtype == b.dtype)
2180
2181 # Do broadcast
2182 shape = []
2183 for i in range(len(a.shape)):
2184 if a.shape[i] == 1:
2185 shape.append(b.shape[i])
2186 else:
2187 shape.append(a.shape[i])
2188
2189 # Force the output type to bool
2190 return ser.addOutput(shape, DType.BOOL, a.usage, a.dformat)
2191
2192 @staticmethod
2193 def reduceOp(ser, a, axis):
2194
2195 shape = a.shape.copy()
2196
2197 shape[axis] = 1
2198
2199 return ser.addOutput(shape, a.dtype, a.usage, a.dformat)
2200
2201 @staticmethod
2202 def argmaxOp(ser, a, axis):
2203 shape = a.shape.copy()
2204 del shape[axis]
2205 return ser.addOutput(shape, DType.INT32, a.usage, a.dformat)
2206
2207 @staticmethod
2208 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2209
2210 # IFM: NHWC
2211 # Filter: OHWI
2212 # OFM: NHWC
2213
2214 if len(padding) == 2:
2215 # Expand padding to 4 parameters in the case of transpose_conv2d
2216 # From H,W to T,B,L,R
2217 padding = [padding[0], padding[0], padding[1], padding[1]]
2218
2219 h = (ifm.shape[1] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[0] - 1) + \
2220 padding[0] + padding[1]) // strides[0] + 1
2221
2222 w = (ifm.shape[2] - filter.shape[2] - (filter.shape[2] - 1) * (dilations[1] - 1) + \
2223 padding[2] + padding[3]) // strides[1] + 1
2224
2225 if h <= 0 or w <= 0:
2226 # Invalid test parameters?
2227 h = 0
2228 w = 0
2229 ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
2230
2231 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2232
Kevin Cheng3a478572021-01-22 17:21:02 -08002233 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002234 out_dtype = DType.INT32
2235 elif ifm.dtype == DType.INT16:
2236 out_dtype = DType.INT48
2237 elif ifm.dtype == DType.FLOAT:
2238 out_dtype = DType.FLOAT
2239 else:
2240 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2241
2242 return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
2243
2244 @staticmethod
2245 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2246 # IFM: NHWC
2247 # Filter: HWCM
2248 # OFM: NHW C*M
2249 h = (ifm.shape[1] - filter.shape[0] - (filter.shape[0] - 1) * (dilations[0] - 1) + \
2250 padding[0] + padding[1]) // strides[0] + 1
2251
2252 w = (ifm.shape[2] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[1] - 1) + \
2253 padding[2] + padding[3]) // strides[1] + 1
2254
2255 if h <= 0 or w <= 0:
2256 # Invalid test parameters?
2257 h = 0
2258 w = 0
2259 ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters')
2260
2261 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2262
Kevin Cheng3a478572021-01-22 17:21:02 -08002263 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002264 out_dtype = DType.INT32
2265 elif ifm.dtype == DType.INT16:
2266 out_dtype = DType.INT48
2267 elif ifm.dtype == DType.FLOAT:
2268 out_dtype = DType.FLOAT
2269 else:
2270 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2271
2272 return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat)
2273
2274
2275 @staticmethod
2276 def pool2dOp(ser, ifm, kernel, stride, pad):
2277 # input: NHWC
2278 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2279 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2280
2281 if h <= 0 or w <= 0:
2282 # Invalid test parameters?
2283 h = 0
2284 w = 0
2285 ser.setExpectedFailure(True, 'Invalid combination of pooling parameters')
2286
2287 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
2288 return ser.addOutput(ofm_shape, ifm.dtype, ifm.usage, ifm.dformat)
2289
2290 @staticmethod
2291 def fullyConnectedOp(ser, input, filter):
2292 # input: N, IC
2293 # filter: OC, IC
2294 # output: N, OC
2295
2296 output_shape = [input.shape[0], filter.shape[0]]
2297
Kevin Cheng3a478572021-01-22 17:21:02 -08002298 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002299 out_dtype = DType.INT32
2300 elif input.dtype == DType.INT16:
2301 out_dtype = DType.INT48
2302 elif input.dtype == DType.FLOAT:
2303 out_dtype = DType.FLOAT
2304 else:
2305 raise Exception('Unsupported input dtype: {}'.format(input.dtype))
2306
2307 return ser.addOutput(output_shape, out_dtype, input.usage, input.dformat)
2308
2309 @staticmethod
2310 def matmulOp(ser, a, b):
2311 # a: M, K
2312 # b: K, N
2313 # out: M, N
2314
2315 output_shape = [a.shape[0], b.shape[1]]
2316
Kevin Cheng3a478572021-01-22 17:21:02 -08002317 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002318 out_dtype = DType.INT32
2319 elif a.dtype == DType.INT16:
2320 out_dtype = DType.INT48
2321 elif a.dtype == DType.FLOAT:
2322 out_dtype = DType.FLOAT
2323 else:
2324 raise Exception('UNsupported input dtype for matmul: {}'.format(a.dtype))
2325
2326 return ser.addOutput(output_shape, out_dtype, a.usage, a.dformat)
2327
2328 @staticmethod
2329 def concatOp(ser, a, b, axis):
2330
2331 output_shape = a.shape.copy()
2332 output_shape[axis] = a.shape[axis] + b.shape[axis]
2333
2334 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2335
2336 @staticmethod
2337 def padOp(ser, a, padding):
2338
2339 output_shape = a.shape.copy()
2340
2341 for i in range(len(output_shape)):
2342 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2343
2344 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2345
2346 @staticmethod
2347 def reshapeOp(ser, a, shape):
2348 output_shape = shape.copy()
2349
2350 totalElements = 1
2351 for i in a.shape:
2352 totalElements *= i
2353
2354 # If there are any -1 elements, figure out what that dimension must be
2355 totalOutputElements = 1
2356 for i in output_shape:
2357 if i != -1:
2358 totalOutputElements *= i
2359
2360 # And fill it in
2361 for i in range(len(output_shape)):
2362 if output_shape[i] == -1:
2363 output_shape[i] = totalElements // totalOutputElements
2364
2365 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2366
2367 @staticmethod
2368 def sliceOp(ser, a, begin, size):
2369
2370 output_shape = size.copy()
2371 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2372
2373 @staticmethod
2374 def tileOp(ser, a, multiples):
2375
2376 output_shape = a.shape.copy()
2377 assert(len(multiples) == len(output_shape))
2378
2379 for i in range(len(output_shape)):
2380 output_shape[i] = a.shape[i] * multiples[i]
2381
2382 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2383
2384 @staticmethod
2385 def transposeOp(ser, a, perms):
2386 output_shape = a.shape.copy()
2387 assert(len(perms) == len(output_shape))
2388
2389 for i in range(len(output_shape)):
2390 output_shape[i] = a.shape[perms[i]]
2391
2392 return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat)
2393
2394 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002395 def gatherOp(ser, values, indices):
2396 assert len(values.shape) == 3
2397 assert len(indices.shape) == 2
2398 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002399
Kevin Cheng77d0f762020-11-24 10:26:32 -08002400 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2401
2402 return ser.addOutput(output_shape, values.dtype, values.usage, values.dformat)
2403
2404 @staticmethod
2405 def scatterOp(ser, values_in, indices, input):
2406 assert len(values_in.shape) == 3
2407 assert len(indices.shape) == 2
2408 assert len(input.shape) == 3
2409 assert values_in.shape[0] == indices.shape[0] # N
2410 assert input.shape[1] == indices.shape[1] # W
2411 assert values_in.shape[2] == input.shape[2] # C
2412
2413 output_shape = values_in.shape
2414
2415 return ser.addOutput(output_shape, values_in.dtype, values_in.usage, values_in.dformat)
Eric Kunzee5e26762020-10-13 16:11:07 -07002416
2417 @staticmethod
2418 def tableOp(ser, input, table):
2419 # Same shape as the input, but with the type of the table.
2420 return ser.addOutput(input.shape, DType.INT32, input.usage, input.dformat)
2421
2422 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002423 def resizeOp(ser, input, mode, stride, offset, shift, stride_fp, offset_fp, output_dims, input_dtype, output_dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -07002424
2425 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2426
Kevin Cheng77d0f762020-11-24 10:26:32 -08002427 if input_dtype == DType.FLOAT:
2428 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
2429 ser.setExpectedFailure(True, 'Negative or zero stride')
2430 else:
2431 if stride[0] <= 0 or stride[1] <= 0:
2432 ser.setExpectedFailure(True, 'Negative or zero stride')
Eric Kunzee5e26762020-10-13 16:11:07 -07002433
Kevin Chengaee1fac2020-11-11 13:54:06 -08002434 if mode == ResizeMode.BILINEAR:
2435 if input_dtype == DType.INT8:
2436 if output_dtype != DType.INT32:
2437 ser.setExpectedFailure(True, 'Invalid output data type')
2438 elif input_dtype == DType.INT16:
2439 if output_dtype != DType.INT48:
2440 ser.setexpectedfailure(true, 'Invalid output data type')
Kevin Cheng77d0f762020-11-24 10:26:32 -08002441 elif input_dtype == DType.FLOAT:
2442 if output_dtype != DType.FLOAT:
2443 ser.setexpectedfailure(true, 'Invalid output data type')
Kevin Chengaee1fac2020-11-11 13:54:06 -08002444 else:
2445 ser.setexpectedfailure(true, 'Invalid input data type')
2446
2447 elif mode == ResizeMode.NEAREST:
2448 if input_dtype == DType.INT8:
2449 if output_dtype != DType.INT8:
2450 ser.setExpectedFailure(True, 'Invalid output data type')
2451 elif input_dtype == DType.INT16:
2452 if output_dtype != DType.INT16:
2453 ser.setexpectedfailure(true, 'Invalid output data type')
Kevin Cheng77d0f762020-11-24 10:26:32 -08002454 elif input_dtype == DType.FLOAT:
2455 if output_dtype != DType.FLOAT:
2456 ser.setexpectedfailure(true, 'Invalid output data type')
Kevin Chengaee1fac2020-11-11 13:54:06 -08002457 else:
2458 ser.setexpectedfailure(true, 'Invalid input data type')
2459
2460 else:
2461 ser.setexpectedfailure(true, 'Invalid resize mode')
2462
Eric Kunzee5e26762020-10-13 16:11:07 -07002463 return ser.addOutput(output_dims, output_dtype, input.usage, input.dformat)
2464
2465 @staticmethod
2466 def typeConversionOp(ser, val, out_dtype):
2467 return ser.addOutput(val.shape, out_dtype, val.usage, val.dformat)
2468
2469 @staticmethod
2470 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002471 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002472 out_dtype = DType.INT32
2473 elif ifm.dtype == DType.INT16:
2474 out_dtype = DType.INT48
2475 elif ifm.dtype == DType.FLOAT:
2476 out_dtype = DType.FLOAT
2477 else:
2478 raise Exception('Unsupported input dtype: {}'.format(ifm.dtype))
2479
2480 if output_shape[1] <= 0 or output_shape[2] <= 0:
2481 ser.setExpectedFailure(True, 'Negative output shape')
2482
2483 return ser.addOutput(output_shape, out_dtype, ifm.usage, ifm.dformat)